filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_18201 | import os
import cPickle
# read class file
dd = {}
f = open('ucfTrainTestlist/classInd.txt')
for line in f.readlines():
label, name = line.split()
dd[name.lower()] = int(label) - 1
f.close()
def generate_pkl(mode):
# generate pkl
path = '%s/' % mode
savepath = '%s_pkl/' % mode
if not os.path.exists(savepath):
os.makedirs(savepath)
fw = open('%s.list' % mode, 'w')
for folder in os.listdir(path):
vidid = folder.split('_', 1)[1]
this_label = dd[folder.split('_')[1].lower()]
this_feat = []
for img in sorted(os.listdir(path + folder)):
fout = open(path + folder + '/' + img, 'rb')
this_feat.append(fout.read())
fout.close()
res = [vidid, this_label, this_feat]
outp = open(savepath + vidid + '.pkl', 'wb')
cPickle.dump(res, outp, protocol=cPickle.HIGHEST_PROTOCOL)
outp.close()
fw.write('data/%s/%s.pkl\n' % (savepath, vidid))
fw.close()
generate_pkl('train')
generate_pkl('test')
|
the-stack_0_18202 | from ctypes import *
import math
import random
import os
from time import time
source_path = os.getcwd() + '/'
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
lib = CDLL((source_path + "../include/libdarknet.so").encode('utf-8'), RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
im = load_image(image, 0, 0)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
res = sorted(res, key=lambda x: -x[1])
free_image(im)
free_detections(dets, num)
return res
if __name__ == "__main__":
set_gpu(0)
net = load_net((source_path + "../models/yolov3.cfg").encode('utf-8'), (source_path + "../models/yolov3_17100.weights").encode('utf-8'), 0)
meta = load_meta((source_path + "../models/coco.data").encode('utf-8'))
start = time()
r = detect(net, meta, (source_path + "../imgs/01.jpg").encode('utf-8'))
print(int((time() - start) * 1000), 'ms')
for msg in r:
print(msg) |
the-stack_0_18203 | import sys
import logging
from awsglue.utils import getResolvedOptions
print(logging.getLogger().handlers) # [<StreamHandler <stderr> (NOTSET)>]
args = getResolvedOptions(sys.argv, ['log_level'])
level = logging.getLevelName(args['log_level'])
logger = logging.getLogger(__name__)
logger.setLevel(level)
logger.debug('Debug')
logger.info('Info')
logger.warning('Warn')
logger.error('Error')
logger.critical('Critical')
raise ValueError('Error test')
|
the-stack_0_18205 | """
Utility for creating an rst table for documenting altair objects
"""
import six
import traitlets
from traitlets.utils.importstring import import_item
from altair.api import TopLevelMixin
try:
# Altair > 1.2
from altair.v1.schema._interface import named_channels, channel_collections
except:
# Altair <= 1.2
from altair.schema import named_channels, channel_collections
__all__ = ['altair_rst_table']
# This holds info for how to build links to the Vega-Lite documentation
VEGALITE_DOC_URL = 'http://vega.github.io/vega-lite/docs/'
VEGALITE_DOC_PAGES = {'config': 'config.html#top-level-config',
'cellconfig': 'config.html#cell-config',
'markconfig': 'config.html#mark-config',
'scaleconfig': 'config.html#scale-config',
'axisconfig': 'config.html#axis-config',
'legendconfig': 'config.html#legend-config',
'facetconfig': 'config.html#facet-config'}
for attr in ['data', 'transform', 'mark', 'encoding', 'aggregate', 'bin',
'sort', 'timeunit', 'scale', 'axis', 'legend']:
VEGALITE_DOC_PAGES[attr] = attr + '.html'
for channel in ['color', 'column', 'detail', 'opacity', 'order', 'path',
'row', 'shape', 'size', 'text', 'x', 'y']:
VEGALITE_DOC_PAGES[channel] = 'encoding.html#def'
def _get_trait_info(name, trait):
"""Get a dictionary of info for an object trait"""
type_ = trait.info()
help_ = trait.help
if isinstance(trait, traitlets.List):
trait_info = _get_trait_info('', trait._trait)
type_ = 'list of {0}'.format(trait_info['type'])
elif isinstance(trait, traitlets.Enum):
values = trait.values
if all(isinstance(val, str) for val in values):
type_ = 'string'
help_ += ' One of {0}.'.format(values)
elif isinstance(trait, traitlets.Union):
trait_info = [_get_trait_info('', t) for t in trait.trait_types]
type_ = ' or '.join(info['type'] for info in trait_info)
help_ += '/'.join([info['help'] for info in trait_info
if info['help'] != '--'])
elif isinstance(trait, traitlets.Instance):
cls = trait.klass
if isinstance(cls, six.string_types):
cls = import_item(cls)
if issubclass(cls, traitlets.HasTraits):
type_ = ':class:`~altair.{0}`'.format(cls.__name__)
type_ = type_.replace('a ', '')
type_ = type_.replace('unicode string', 'string')
return {'name': name, 'help': help_ or '--', 'type': type_ or '--'}
def _get_category(obj):
"""Get the category of an altair object"""
name = obj.__name__
if 'Chart' in name:
return (0, 'Top Level Objects')
elif name in dir(named_channels):
return (2, 'Encoding Channels')
elif name in dir(channel_collections):
# out of order because encoding channels also appear here
return (1, 'Channel Collections')
elif 'Config' in name:
return (3, 'Config Objects')
else:
return (4, 'Other Objects')
def _get_object_info(obj):
"""Get a dictionary of info for an object, suitable for the template"""
D = {}
name = obj.__name__
D['name'] = name
if name.lower() in VEGALITE_DOC_PAGES:
url = VEGALITE_DOC_URL + VEGALITE_DOC_PAGES[name.lower()]
D['description'] = ("(See also Vega-Lite's Documentation for "
"`{0} <{1}>`_)".format(name, url))
D['traits'] = [_get_trait_info(name, trait)
for name, trait in sorted(obj.class_traits().items())]
D['category'] = _get_category(obj)
return D
def altair_rst_table(obj, columns=None, title_map=None,
include_description=False):
obj_info = _get_object_info(obj)
columns = columns or ['name', 'type', 'help']
title_map = title_map or {'name':'Trait', 'type':'Type',
'help':'Description'}
rows = [[trait[column] for column in columns]
for trait in obj_info['traits']]
titles = [title_map.get(column, column) for column in columns]
lengths = [[len(item) for item in row] for row in rows]
maxlengths = [max(col) for col in zip(*lengths)]
def pad(row, fill=' '):
return ' '.join(item.ljust(length, fill)
for item, length in zip(row, maxlengths))
div = pad(['', '', ''], fill='=')
lines = ['']
if include_description and 'description' in obj_info:
lines.extend([obj_info['description'], ''])
lines.extend(['', div, pad(titles), div])
lines.extend(map(pad, rows))
lines.extend([div, '', ''])
return lines
|
the-stack_0_18206 | """Interface for the DC-EGM algorithm."""
import copy
from functools import partial
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
import numpy as np
import pandas as pd
from dcegm.egm_step import do_egm_step
from dcegm.upper_envelope_step import do_upper_envelope_step
from scipy import interpolate
from scipy.special.orthogonal import roots_sh_legendre
from scipy.stats import norm
def solve_dcegm(
params: pd.DataFrame,
options: Dict[str, int],
utility_functions: Dict[str, callable],
compute_expected_value: Callable,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Solves a discrete-continuous life-cycle model using the DC-EGM algorithm.
EGM stands for Endogenous Grid Method.
Args:
params (pd.DataFrame): Model parameters indexed with multi-index of the
form ("category", "name") and two columns ["value", "comment"].
options (dict): Options dictionary.
utility_functions (Dict[str, callable]): Dictionary of three user-supplied
functions for computation of (i) utility, (ii) inverse marginal utility,
and (iii) next period marginal utility.
compute_expected_value (callable): User-supplied functions for computation
of the agent's expected value.
Returns:
(tuple): Tuple containing
- policy (np.ndarray): Multi-dimensional np.ndarray storing the
choice-specific policy function; of shape
[n_periods, n_discrete_choices, 2, 1.1 * n_grid_wealth].
Position [.., 0, :] of contains the endogenous grid over wealth M,
and [.., 1, :] stores the corresponding value of the policy function
c(M, d), for each time period and each discrete choice.
- value (np.ndarray): Multi-dimensional np.ndarray storing the
choice-specific value functions; of shape
[n_periods, n_discrete_choices, 2, 1.1 * n_grid_wealth].
Position [.., 0, :] of contains the endogenous grid over wealth M,
and [.., 1, :] stores the corresponding value of the value function
v(M, d), for each time period and each discrete choice.
"""
max_wealth = params.loc[("assets", "max_wealth"), "value"]
n_periods = options["n_periods"]
n_choices = options["n_discrete_choices"]
n_grid_wealth = options["grid_points_wealth"]
n_quad_points = options["quadrature_points_stochastic"]
# If only one state, i.e. no discrete choices to make,
# set choice_range to 1 = "working".
choice_range = [1] if n_choices < 2 else range(n_choices)
savings_grid = np.linspace(0, max_wealth, n_grid_wealth)
# Gauss-Legendre (shifted) quadrature over the interval [0,1].
# Standard Gauss-Legendre quadrature (scipy.special.roots_legendre)
# integrates over [-1, 1].
quad_points, quad_weights = roots_sh_legendre(n_quad_points)
quad_points_normal = norm.ppf(quad_points)
exogenous_grid = {
"savings": savings_grid,
"quadrature_points": quad_points_normal,
"quadrature_weights": quad_weights,
}
# Create nested lists for consumption policy and value function.
# We cannot use multi-dim np.ndarrays here, since the length of
# the grid is altered by the Upper Envelope step!
policy_arr, value_arr = _create_multi_dim_arrays(options)
policy_arr, value_arr = solve_final_period(
policy_arr,
value_arr,
savings_grid=savings_grid,
params=params,
options=options,
compute_utility=utility_functions["utility"],
)
# Make new function or move inside func:`solve_final_period`
current_policy_function, current_value_function = {}, {}
for index, state in enumerate(choice_range):
final_policy = policy_arr[n_periods - 1, index, :][
:,
~np.isnan(policy_arr[n_periods - 1, index, :]).any(axis=0),
]
current_policy_function[state] = partial(
interpolate_policy,
policy=final_policy,
)
current_value_function[state] = partial(
utility_functions["utility"], state=state, params=params
)
# Start backwards induction from second to last period (T - 1)
for period in range(n_periods - 2, -1, -1):
# Update and reset dictionaries
next_period_policy_function = current_policy_function
next_period_value_function = current_value_function
current_policy_function, current_value_function = {}, {}
for index, state in enumerate(choice_range):
current_policy, current_value, expected_value = do_egm_step(
period,
state,
params=params,
options=options,
exogenous_grid=exogenous_grid,
utility_functions=utility_functions,
compute_expected_value=compute_expected_value,
next_period_policy_function=next_period_policy_function,
next_period_value_function=next_period_value_function,
)
if state >= 1 and n_choices > 1:
current_policy, current_value = do_upper_envelope_step(
current_policy,
current_value,
expected_value=expected_value,
params=params,
options=options,
compute_utility=utility_functions["utility"],
)
else:
pass
current_value_function[state] = partial(
interpolate_value,
value=current_value,
state=state,
params=params,
compute_utility=utility_functions["utility"],
)
current_policy_function[state] = partial(
interpolate_policy,
policy=current_policy,
)
# Store
policy_arr[period, index, :, : current_policy.shape[1]] = current_policy
value_arr[period, index, :, : current_value.shape[1]] = current_value
return policy_arr, value_arr
def interpolate_policy(flat_wealth: np.ndarray, policy: np.ndarray) -> np.ndarray:
"""Interpolate the agent's policy for given flat wealth matrix.
Args:
flat_wealth (np.ndarray): Flat array of shape
(n_quad_stochastic *n_grid_wealth,) containing the agent's
potential wealth matrix in given period.
policy (np.ndarray): Policy array of shape (2, 1.1 * n_grid_wealth).
Position [0, :] of the arrays contain the endogenous grid over wealth M,
and [1, :] stores the corresponding value of the (consumption) policy
function c(M, d), for each time period and each discrete choice.
"""
policy = policy[:, ~np.isnan(policy).any(axis=0)]
policy_interp = np.empty(flat_wealth.shape)
interpolation_func = interpolate.interp1d(
x=policy[0, :],
y=policy[1, :],
bounds_error=False,
fill_value="extrapolate",
kind="linear",
)
policy_interp = interpolation_func(flat_wealth)
return policy_interp
def interpolate_value(
flat_wealth: np.ndarray,
value: np.ndarray,
state: int,
params: pd.DataFrame,
compute_utility: Callable,
) -> np.ndarray:
"""Interpolate the agent's value for given flat wealth matrix.
Args:
flat_wealth (np.ndarray): Flat array of shape
(n_quad_stochastic *n_grid_wealth,) containing the agent's
potential wealth matrix in given period.
value (np.ndarray): Value array of shape (2, 1.1* n_grid_wealth).
Position [0, :] of the array contains the endogenous grid over wealth M,
and [1, :] stores the corresponding value of the value function v(M, d),
for each time period and each discrete choice.
state (int): State of the agent, e.g. 0 = "retirement", 1 = "working".
params (pd.DataFrame): Model parameters indexed with multi-index of the
form ("category", "name") and two columns ["value", "comment"].
compute_utility (callable): Function for computation of agent's utility.
"""
value = value[:, ~np.isnan(value).any(axis=0)]
value_interp = np.empty(flat_wealth.shape)
# Mark credit constrained region
constrained_region = flat_wealth < value[0, 1]
# Calculate t+1 value function in constrained region using
# the analytical part
value_interp[constrained_region] = _get_value_constrained(
flat_wealth[constrained_region],
next_period_value=value[1, 0],
state=state,
params=params,
compute_utility=compute_utility,
)
# Calculate t+1 value function in non-constrained region
# via inter- and extrapolation
interpolation_func = interpolate.interp1d(
x=value[0, :], # endogenous wealth grid
y=value[1, :], # value_function
bounds_error=False,
fill_value="extrapolate",
kind="linear",
)
value_interp[~constrained_region] = interpolation_func(
flat_wealth[~constrained_region]
)
return value_interp
def solve_final_period(
policy: np.ndarray,
value: np.ndarray,
savings_grid: np.ndarray,
*,
params: pd.DataFrame,
options: Dict[str, int],
compute_utility: Callable,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Computes solution to final period for policy and value function.
Args:
policy (np.ndarray): Multi-dimensional np.ndarray storing the
choice-specific policy function; of shape
[n_periods, n_discrete_choices, 2, 1.1 * n_grid_wealth].
Position [.., 0, :] of contains the endogenous grid over wealth M,
and [.., 1, :] stores the corresponding value of the policy function
c(M, d), for each time period and each discrete choice.
value (np.ndarray): Multi-dimensional np.ndarray storing the
choice-specific value functions; of shape
[n_periods, n_discrete_choices, 2, 1.1 * n_grid_wealth].
Position [.., 0, :] of contains the endogenous grid over wealth M,
and [.., 1, :] stores the corresponding value of the value function
v(M, d), for each time period and each discrete choice.
savings_grid (np.ndarray): Array of shape (n_wealth_grid,) denoting the
exogenous savings grid.
params (pd.DataFrame): Model parameters indexed with multi-index of the
form ("category", "name") and two columns ["value", "comment"].
options (dict): Options dictionary.
compute_utility (callable): Function for computation of agent's utility.
Returns:
(tuple): Tuple containing
- policy (List[np.ndarray]): Nested list of np.ndarrays storing the
choice-specific consumption policies with the solution for the final
period included.
- value (List[np.ndarray]): Nested list of np.ndarrays storing the
choice-specific value functions with the solution for the final period
included.
"""
n_periods = options["n_periods"]
n_choices = options["n_discrete_choices"]
choice_range = [1] if n_choices < 2 else range(n_choices)
# In last period, nothing is saved for the next period (since there is none).
# Hence, everything is consumed, c_T(M, d) = M
end_grid = savings_grid.shape[0] + 1
for state_index, state in enumerate(choice_range):
policy[n_periods - 1, state_index, 0, 1:end_grid] = copy.deepcopy(
savings_grid
) # M
policy[n_periods - 1, state_index, 1, 1:end_grid] = copy.deepcopy(
policy[n_periods - 1, state_index, 0, 1:end_grid]
) # c(M, d)
policy[n_periods - 1, state_index, 0, 0] = 0
policy[n_periods - 1, state_index, 1, 0] = 0
value[n_periods - 1, state_index, 0, 2:end_grid] = compute_utility(
policy[n_periods - 1, state_index, 0, 2:end_grid], state, params
)
value[n_periods - 1, state_index][1, 2:end_grid] = compute_utility(
policy[n_periods - 1, state_index, 1, 2:end_grid], state, params
)
value[n_periods - 1, state_index, 0, 0] = 0
value[n_periods - 1, state_index, :, 2] = 0
return policy, value
def _create_multi_dim_arrays(
options: Dict[str, int]
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Create multi-diminesional array for storing the policy and value function.
Note that we add 10% extra space filled with nans, since, in the upper
envelope step, the endogenous wealth grid might be augmented to the left
in order to accurately describe potential non-monotonicities (and hence
discontinuities) near the start of the grid.
We include one additional grid point (n_grid_wealth + 1) to M,
since we want to set the first position (j=0) to M_t = 0 for all time
periods.
Moreover, the lists have variable length, because the Upper Envelope step
drops suboptimal points from the original grid and adds new ones (kink
points as well as the corresponding interpolated values of the consumption
and value functions).
Args:
options (dict): Options dictionary.
Returns:
(tuple): Tuple containing
- policy (np.ndarray): Multi-dimensional np.ndarray storing the
choice-specific policy function; of shape
[n_periods, n_discrete_choices, 2, 1.1 * n_grid_wealth].
Position [.., 0, :] of contains the endogenous grid over wealth M,
and [.., 1, :] stores the corresponding value of the policy function
c(M, d), for each time period and each discrete choice.
- value (np.ndarray): Multi-dimensional np.ndarray storing the
choice-specific value functions; of shape
[n_periods, n_discrete_choices, 2, 1.1 * n_grid_wealth].
Position [.., 0, :] of contains the endogenous grid over wealth M,
and [.., 1, :] stores the corresponding value of the value function
v(M, d), for each time period and each discrete choice.
"""
n_grid_wealth = options["grid_points_wealth"]
n_periods = options["n_periods"]
n_choices = options["n_discrete_choices"]
policy_arr = np.empty((n_periods, n_choices, 2, int(1.1 * n_grid_wealth)))
value_arr = np.empty((n_periods, n_choices, 2, int(1.1 * n_grid_wealth)))
policy_arr[:] = np.nan
value_arr[:] = np.nan
return policy_arr, value_arr
def _get_value_constrained(
wealth: np.ndarray,
next_period_value: np.ndarray,
state: int,
params: pd.DataFrame,
compute_utility: Callable,
) -> np.ndarray:
""" "Compute the agent's value in the credit constrained region."""
beta = params.loc[("beta", "beta"), "value"]
utility = compute_utility(wealth, state, params)
value_constrained = utility + beta * next_period_value
return value_constrained
|
the-stack_0_18209 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from parlai.core.params import ParlaiParser
from parlai.mturk.core.mturk_manager import MTurkManager
from worlds import TalkTheWalkWorld, InstructionWorld
from task_config import task_config
"""
This task consists of two local human agents and two MTurk agents,
chatting with each other in a free-form format.
You can end the conversation by sending a message ending with
`[DONE]` from human_1.
"""
def main():
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
argparser.add_argument('--replay', action='store_true',
help='Set to replay old interactions')
argparser.add_argument('--replay-log-file', type=str, default='',
help='location of log to use if replay')
argparser.add_argument('--real-time', action='store_true',
help='Set to replay in real time ')
argparser.add_argument('--replay-bot', action='store_true',
help='Set to replay bot actions instead of human')
argparser.add_argument('--model-file', type=str, default='',
help='language generator model file')
argparser.add_argument('--world-idx', type=int, default=-1,
help='specify world to load')
argparser.add_argument('--start-idx', type=int, default=0,
help='where to start replay, if replaying actions')
argparser.add_argument('--bot-type', type=str, default='discrete',
choices=['discrete', 'natural'],
help='which bot log to use')
opt = argparser.parse_args()
opt.update(task_config)
mturk_agent_1_id = 'Tourist'
mturk_agent_2_id = 'Guide'
mturk_agent_ids = [mturk_agent_1_id, mturk_agent_2_id]
task_directory_path = os.path.dirname(os.path.abspath(__file__))
opt['task'] = os.path.basename(task_directory_path)
opt['data_path'] = os.getcwd() + '/data/' + opt['task']
mturk_manager = MTurkManager(opt=opt,
mturk_agent_ids=mturk_agent_ids)
mturk_manager.setup_server(task_directory_path=task_directory_path)
try:
mturk_manager.start_new_run()
mturk_manager.create_hits()
def run_onboard(worker):
world = InstructionWorld(opt=opt, mturk_agent=worker)
while not world.episode_done():
world.parley()
world.shutdown()
mturk_manager.set_onboard_function(onboard_function=run_onboard)
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
global worker_count
worker_count = 0
def assign_worker_roles(workers):
workers[0].id = mturk_agent_ids[0]
workers[1].id = mturk_agent_ids[1]
return [workers[0], workers[1]]
def run_conversation(mturk_manager, opt, workers):
# Create mturk agents
mturk_agent_1 = workers[0]
mturk_agent_2 = workers[1]
conv_idx = mturk_manager.conversation_index
world = TalkTheWalkWorld(opt=opt,
agents=[mturk_agent_1, mturk_agent_2],
world_tag=conv_idx)
while not world.episode_done():
world.parley()
world.shutdown()
world.review_work()
if not opt.get('replay'):
world.save()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except Exception:
raise
finally:
mturk_manager.expire_all_unassigned_hits()
mturk_manager.shutdown()
if __name__ == '__main__':
main()
|
the-stack_0_18211 | import numpy as np
import torch
from spotlight.torch_utils import gpu
def _predict_process_ids(user_ids, item_ids, num_items, use_cuda):
if item_ids is None:
item_ids = np.arange(num_items, dtype=np.int64)
if np.isscalar(user_ids):
user_ids = np.array(user_ids, dtype=np.int64)
user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))
item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))
if item_ids.size()[0] != user_ids.size(0):
user_ids = user_ids.expand(item_ids.size())
user_var = gpu(user_ids, use_cuda)
item_var = gpu(item_ids, use_cuda)
return user_var.squeeze(), item_var.squeeze()
|
the-stack_0_18212 | """SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py 2014/09/27 12:51:43 garyo"
import re
import SCons.Node.FS
import SCons.Util
class _Null(object):
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kw)
else:
return Base(function, *args, **kw)
class FindPathDirs(object):
"""A class to bind a specific *PATH variable name to a function that
will return all of the *path directories."""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
dir = dir or env.fs._cwd
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(dir.Rfindalldirs(path))
class Base(object):
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
# Node.FS.Base so that, by default, it's okay for a
# scanner to return a Dir, File or Entry.
node_class = SCons.Node.FS.Base,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = list(function.keys())
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if not self.argument is _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path = ()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if not self.argument is _null:
list = self.function(node, env, path, self.argument)
else:
list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.get_factory(self.node_factory)
nodes = []
for l in list:
if self.node_class and not isinstance(l, self.node_class):
l = node_factory(l, **kw)
nodes.append(l)
return nodes
def __cmp__(self, other):
try:
return cmp(self.__dict__, other.__dict__)
except AttributeError:
# other probably doesn't have a __dict__
return cmp(self.__dict__, other)
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
Base.__init__(self, None, *args, **kw)
self.dict = dict
self.skeys = list(dict.keys())
def __call__(self, node, env, path = ()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
Base.__init__(self, *args, **kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, env, path=(), self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
Current.__init__(self, *args, **kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
return self.cre.findall(node.get_text_contents())
def scan(self, node, path=()):
# cache the includes list in node so we only scan it once:
if node.includes is not None:
includes = node.includes
else:
includes = self.find_include_names (node)
# Intern the names of the include files. Saves some memory
# if the same header is included many times.
node.includes = list(map(SCons.Util.silent_intern, includes))
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
nodes.append((self.sort_key(include), n))
return [pair[1] for pair in sorted(nodes)]
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
i = SCons.Util.silent_intern(include[1])
return n, i
def sort_key(self, include):
return SCons.Node.FS._my_normcase(' '.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
the-stack_0_18216 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Collection of tests for :mod:`orion.storage`."""
import copy
import json
import logging
import os
import tempfile
import pytest
from orion.core.io.database import database_factory
from orion.core.io.database.pickleddb import PickledDB
from orion.core.utils.exceptions import MissingResultFile
from orion.core.utils.singleton import (
SingletonAlreadyInstantiatedError,
SingletonNotInstantiatedError,
update_singletons,
)
from orion.core.worker.trial import Trial
from orion.storage.base import FailedUpdate
from orion.storage.legacy import get_database, setup_database
from orion.testing import OrionState
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
base_experiment = {
"name": "default_name",
"version": 0,
"metadata": {
"user": "default_user",
"user_script": "abc",
"datetime": "2017-11-23T02:00:00",
},
}
base_trial = {
"experiment": "default_name",
"status": "new", # new, reserved, suspended, completed, broken
"worker": None,
"submit_time": "2017-11-23T02:00:00",
"start_time": None,
"end_time": None,
"heartbeat": None,
"results": [
{"name": "loss", "type": "objective", "value": 2} # objective, constraint
],
"params": [
{"name": "/encoding_layer", "type": "categorical", "value": "rnn"},
{
"name": "/decoding_layer",
"type": "categorical",
"value": "lstm_with_attention",
},
],
}
mongodb_config = {
"database": {
"type": "MongoDB",
"name": "orion_test",
"username": "user",
"password": "pass",
}
}
db_backends = [{"type": "legacy", "database": mongodb_config}]
@pytest.mark.usefixtures("setup_pickleddb_database")
def test_setup_database_default(monkeypatch):
"""Test that database is setup using default config"""
update_singletons()
setup_database()
database = database_factory.create()
assert isinstance(database, PickledDB)
def test_setup_database_bad():
"""Test how setup fails when configuring with non-existant backends"""
update_singletons()
with pytest.raises(NotImplementedError) as exc:
setup_database({"type": "idontexist"})
assert exc.match("idontexist")
def test_setup_database_custom():
"""Test setup with local configuration"""
update_singletons()
setup_database({"type": "pickleddb", "host": "test.pkl"})
database = database_factory.create()
assert isinstance(database, PickledDB)
assert database.host == os.path.abspath("test.pkl")
def test_setup_database_bad_override():
"""Test setup with different type than existing singleton"""
update_singletons()
setup_database({"type": "pickleddb", "host": "test.pkl"})
database = database_factory.create()
assert isinstance(database, PickledDB)
with pytest.raises(SingletonAlreadyInstantiatedError) as exc:
setup_database({"type": "mongodb"})
assert exc.match("A singleton instance of \(type: Database\)")
def test_setup_database_bad_config_override():
"""Test setup with different config than existing singleton"""
update_singletons()
setup_database({"type": "pickleddb", "host": "test.pkl"})
database = database_factory.create()
assert isinstance(database, PickledDB)
with pytest.raises(SingletonAlreadyInstantiatedError):
setup_database({"type": "pickleddb", "host": "other.pkl"})
def test_get_database_uninitiated():
"""Test that get database fails if no database singleton exist"""
update_singletons()
with pytest.raises(SingletonNotInstantiatedError) as exc:
get_database()
assert exc.match("No singleton instance of \(type: Database\) was created")
def test_get_database():
"""Test that get database gets the singleton"""
update_singletons()
setup_database({"type": "pickleddb", "host": "test.pkl"})
database = get_database()
assert isinstance(database, PickledDB)
assert get_database() == database
class TestLegacyStorage:
"""Test Legacy Storage retrieve result mechanic separately"""
def test_push_trial_results(self, storage=None):
"""Successfully push a completed trial into database."""
reserved_trial = copy.deepcopy(base_trial)
reserved_trial["status"] = "reserved"
with OrionState(
experiments=[], trials=[reserved_trial], storage=storage
) as cfg:
storage = cfg.storage()
trial = storage.get_trial(Trial(**reserved_trial))
results = [Trial.Result(name="loss", type="objective", value=2)]
trial.results = results
assert storage.push_trial_results(trial), "should update successfully"
trial2 = storage.get_trial(trial)
assert trial2.results == results
def test_push_trial_results_unreserved(self, storage=None):
"""Successfully push a completed trial into database."""
with OrionState(experiments=[], trials=[base_trial], storage=storage) as cfg:
storage = cfg.storage()
trial = storage.get_trial(Trial(**base_trial))
results = [Trial.Result(name="loss", type="objective", value=2)]
trial.results = results
with pytest.raises(FailedUpdate):
storage.push_trial_results(trial)
|
the-stack_0_18217 | # Copyright (C) 2019-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
from glob import iglob
from typing import (
Any, Callable, Dict, Iterator, List, NoReturn, Optional, Sequence, Tuple,
TypeVar,
)
import os
import os.path as osp
from attr import attrs, define, field
import attr
import numpy as np
from datumaro.components.annotation import (
Annotation, AnnotationType, Categories, IndexMaskImage,
)
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.errors import (
AnnotationImportError, DatasetNotFoundError, DatumaroError, ItemImportError,
)
from datumaro.components.format_detection import (
FormatDetectionConfidence, FormatDetectionContext,
)
from datumaro.components.media import Image
from datumaro.components.progress_reporting import (
NullProgressReporter, ProgressReporter,
)
from datumaro.util import is_method_redefined
from datumaro.util.attrs_util import default_if_none, not_empty
DEFAULT_SUBSET_NAME = 'default'
@attrs(slots=True, order=False, eq=False)
class DatasetItem:
id: str = field(converter=lambda x: str(x).replace('\\', '/'),
validator=not_empty)
annotations: List[Annotation] = field(
factory=list, validator=default_if_none(list))
subset: str = field(converter=lambda v: v or DEFAULT_SUBSET_NAME,
default=None)
# TODO: introduce "media" field with type info. Replace image and pcd.
image: Optional[Image] = field(default=None)
# TODO: introduce pcd type like Image
point_cloud: Optional[str] = field(
converter=lambda x: str(x).replace('\\', '/') if x else None,
default=None)
related_images: List[Image] = field(default=None)
# Class mask for semantic segmentation
class_mask: Optional[IndexMaskImage] = field(default=None)
def __eq__(self, other):
if other.__class__ is not self.__class__:
return False
return (
self.id,
self.annotations,
self.subset,
self.image,
self.point_cloud,
self.related_images,
self.attributes,
) == (
other.id,
other.annotations,
other.subset,
other.image,
other.point_cloud,
other.related_images,
other.attributes,
) and np.array_equal(self.class_mask, other.class_mask)
def __attrs_post_init__(self):
if (self.has_image and self.has_point_cloud):
raise ValueError("Can't set both image and point cloud info")
if self.related_images and not self.has_point_cloud:
raise ValueError("Related images require point cloud")
def _image_converter(image):
if callable(image) or isinstance(image, np.ndarray):
image = Image(data=image)
elif isinstance(image, str):
image = Image(path=image)
assert image is None or isinstance(image, Image), type(image)
return image
image.converter = _image_converter
def _related_image_converter(images):
return list(map(__class__._image_converter, images or []))
related_images.converter = _related_image_converter
@point_cloud.validator
def _point_cloud_validator(self, attribute, pcd):
assert pcd is None or isinstance(pcd, str), type(pcd)
attributes: Dict[str, Any] = field(
factory=dict, validator=default_if_none(dict))
@property
def has_image(self):
return self.image is not None
@property
def has_point_cloud(self):
return self.point_cloud is not None
def wrap(item, **kwargs):
return attr.evolve(item, **kwargs)
CategoriesInfo = Dict[AnnotationType, Categories]
class IExtractor:
def __iter__(self) -> Iterator[DatasetItem]:
raise NotImplementedError()
def __len__(self) -> int:
raise NotImplementedError()
def __bool__(self): # avoid __len__ use for truth checking
return True
def subsets(self) -> Dict[str, IExtractor]:
raise NotImplementedError()
def get_subset(self, name) -> IExtractor:
raise NotImplementedError()
def categories(self) -> CategoriesInfo:
raise NotImplementedError()
def get(self, id, subset=None) -> Optional[DatasetItem]:
raise NotImplementedError()
class _ExtractorBase(IExtractor):
def __init__(self, *, length=None, subsets=None):
self._length = length
self._subsets = subsets
def _init_cache(self):
subsets = set()
length = -1
for length, item in enumerate(self):
subsets.add(item.subset)
length += 1
if self._length is None:
self._length = length
if self._subsets is None:
self._subsets = subsets
def __len__(self):
if self._length is None:
self._init_cache()
return self._length
def subsets(self) -> Dict[str, IExtractor]:
if self._subsets is None:
self._init_cache()
return {name or DEFAULT_SUBSET_NAME: self.get_subset(name)
for name in self._subsets}
def get_subset(self, name):
if self._subsets is None:
self._init_cache()
if name in self._subsets:
if len(self._subsets) == 1:
return self
subset = self.select(lambda item: item.subset == name)
subset._subsets = [name]
return subset
else:
raise KeyError("Unknown subset '%s', available subsets: %s" % \
(name, set(self._subsets)))
def transform(self, method, *args, **kwargs):
return method(self, *args, **kwargs)
def select(self, pred):
class _DatasetFilter(_ExtractorBase):
def __iter__(_):
return filter(pred, iter(self))
def categories(_):
return self.categories()
return _DatasetFilter()
def categories(self):
return {}
def get(self, id, subset=None):
subset = subset or DEFAULT_SUBSET_NAME
for item in self:
if item.id == id and item.subset == subset:
return item
return None
T = TypeVar('T')
class _ImportFail(DatumaroError):
pass
class ImportErrorPolicy:
def report_item_error(self, error: Exception, *,
item_id: Tuple[str, str]) -> None:
"""
Allows to report a problem with a dataset item.
If this function returns, the extractor must skip the item.
"""
if not isinstance(error, _ImportFail):
ie = ItemImportError(item_id)
ie.__cause__ = error
return self._handle_item_error(ie)
else:
raise error
def report_annotation_error(self, error: Exception, *,
item_id: Tuple[str, str]) -> None:
"""
Allows to report a problem with a dataset item annotation.
If this function returns, the extractor must skip the annotation.
"""
if not isinstance(error, _ImportFail):
ie = AnnotationImportError(item_id)
ie.__cause__ = error
return self._handle_annotation_error(ie)
else:
raise error
def _handle_item_error(self, error: ItemImportError) -> None:
"""This function must either call fail() or return."""
self.fail(error)
def _handle_annotation_error(self, error: AnnotationImportError) -> None:
"""This function must either call fail() or return."""
self.fail(error)
def fail(self, error: Exception) -> NoReturn:
raise _ImportFail from error
class FailingImportErrorPolicy(ImportErrorPolicy):
pass
@define(eq=False)
class ImportContext:
progress_reporter: ProgressReporter = field(default=None,
converter=attr.converters.default_if_none(factory=NullProgressReporter))
error_policy: ImportErrorPolicy = field(default=None,
converter=attr.converters.default_if_none(factory=FailingImportErrorPolicy))
class NullImportContext(ImportContext):
pass
class Extractor(_ExtractorBase, CliPlugin):
"""
A base class for user-defined and built-in extractors.
Should be used in cases, where SourceExtractor is not enough,
or its use makes problems with performance, implementation etc.
"""
def __init__(self, *,
length: Optional[int] = None,
subsets: Optional[Sequence[str]] = None,
ctx: Optional[ImportContext] = None):
super().__init__(length=length, subsets=subsets)
self._ctx: ImportContext = ctx or NullImportContext()
class SourceExtractor(Extractor):
"""
A base class for simple, single-subset extractors.
Should be used by default for user-defined extractors.
"""
def __init__(self, *,
length: Optional[int] = None,
subset: Optional[str] = None,
ctx: Optional[ImportContext] = None):
self._subset = subset or DEFAULT_SUBSET_NAME
super().__init__(length=length, subsets=[self._subset], ctx=ctx)
self._categories = {}
self._items = []
def categories(self):
return self._categories
def __iter__(self):
yield from self._items
def __len__(self):
return len(self._items)
def get(self, id, subset=None):
assert subset == self._subset, '%s != %s' % (subset, self._subset)
return super().get(id, subset or self._subset)
class Importer(CliPlugin):
@classmethod
def detect(
cls, context: FormatDetectionContext,
) -> Optional[FormatDetectionConfidence]:
if not cls.find_sources_with_params(context.root_path):
context.fail("specific requirement information unavailable")
return FormatDetectionConfidence.LOW
@classmethod
def find_sources(cls, path) -> List[Dict]:
raise NotImplementedError()
@classmethod
def find_sources_with_params(cls, path, **extra_params) -> List[Dict]:
return cls.find_sources(path)
def __call__(self, path, **extra_params):
if not path or not osp.exists(path):
raise DatasetNotFoundError(path)
found_sources = self.find_sources_with_params(
osp.normpath(path), **extra_params)
if not found_sources:
raise DatasetNotFoundError(path)
sources = []
for desc in found_sources:
params = dict(extra_params)
params.update(desc.get('options', {}))
desc['options'] = params
sources.append(desc)
return sources
@classmethod
def _find_sources_recursive(cls, path: str, ext: Optional[str],
extractor_name: str, filename: str = '*', dirname: str = '',
file_filter: Optional[Callable[[str], bool]] = None,
max_depth: int = 3):
"""
Finds sources in the specified location, using the matching pattern
to filter file names and directories.
Supposed to be used, and to be the only call in subclasses.
Parameters:
path: a directory or file path, where sources need to be found.
ext: file extension to match. To match directories,
set this parameter to None or ''. Comparison is case-independent,
a starting dot is not required.
extractor_name: the name of the associated Extractor type
filename: a glob pattern for file names
dirname: a glob pattern for filename prefixes
file_filter: a callable (abspath: str) -> bool, to filter paths found
max_depth: the maximum depth for recursive search.
Returns: a list of source configurations
(i.e. Extractor type names and c-tor parameters)
"""
if ext:
if not ext.startswith('.'):
ext = '.' + ext
ext = ext.lower()
if (path.lower().endswith(ext) and osp.isfile(path)) or \
(not ext and dirname and osp.isdir(path) and \
os.sep + osp.normpath(dirname.lower()) + os.sep in \
osp.abspath(path.lower()) + os.sep):
sources = [{'url': path, 'format': extractor_name}]
else:
sources = []
for d in range(max_depth + 1):
sources.extend({'url': p, 'format': extractor_name} for p in
iglob(osp.join(path, *('*' * d), dirname, filename + ext))
if (callable(file_filter) and file_filter(p)) \
or (not callable(file_filter)))
if sources:
break
return sources
class Transform(_ExtractorBase, CliPlugin):
"""
A base class for dataset transformations that change dataset items
or their annotations.
"""
@staticmethod
def wrap_item(item, **kwargs):
return item.wrap(**kwargs)
def __init__(self, extractor: IExtractor):
super().__init__()
self._extractor = extractor
def categories(self):
return self._extractor.categories()
def subsets(self):
if self._subsets is None:
self._subsets = set(self._extractor.subsets())
return super().subsets()
def __len__(self):
assert self._length in {None, 'parent'} or isinstance(self._length, int)
if self._length is None and \
not is_method_redefined('__iter__', Transform, self) \
or self._length == 'parent':
self._length = len(self._extractor)
return super().__len__()
class ItemTransform(Transform):
def transform_item(self, item: DatasetItem) -> Optional[DatasetItem]:
"""
Returns a modified copy of the input item.
Avoid changing and returning the input item, because it can lead to
unexpected problems. Use wrap_item() or item.wrap() to simplify copying.
"""
raise NotImplementedError()
def __iter__(self):
for item in self._extractor:
item = self.transform_item(item)
if item is not None:
yield item
|
the-stack_0_18218 | from pandac.PandaModules import *
from direct.distributed import DistributedObject
from direct.interval.ProjectileInterval import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.racing.DistributedVehicle import DistributedVehicle
from DroppedGag import *
class DistributedGag(DistributedObject.DistributedObject):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.nodePath = None
self.billboard = False
self.scale = 1
self.shadow = True
self.dropShadow = None
self.type = 0
return
def delete(self):
DistributedObject.DistributedObject.delete(self)
self.nodePath.delete()
self.ignoreAll()
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
if not self.nodePath:
self.makeNodePath()
self.delta = -globalClockDelta.networkToLocalTime(self.initTime, globalClock.getFrameTime(), 16, 100) + globalClock.getFrameTime()
if self.type == 0:
self.name = self.uniqueName('banana')
elif self.type == 1:
self.name = self.uniqueName('pie')
self.nodePath.reparentTo(self.race.geom)
if self.ownerId == localAvatar.doId:
base.race.thrownGags[0].remove()
base.race.thrownGags = base.race.thrownGags[1:]
self.nodePath.setPos(self.pos[0], self.pos[1], self.pos[2])
else:
startPos = base.cr.doId2do[self.ownerId].getPos(render)
endPos = Vec3(self.pos[0], self.pos[1], self.pos[2])
throwIt = ProjectileInterval(self.nodePath, startPos=startPos, endPos=endPos, duration=1)
throwIt.start()
taskMgr.doMethodLater(0.8 - self.delta, self.addCollider, self.uniqueName('addCollider'))
def addCollider(self, t):
bs = CollisionSphere(0, 0, 0, 2)
bn = CollisionNode(self.name)
self.bnp = NodePath(bn)
self.bnp.reparentTo(self.nodePath)
self.bnp.node().addSolid(bs)
self.bnp.node().setIntoCollideMask(BitMask32(32768))
self.bnp.node().setFromCollideMask(BitMask32(32768))
self.accept('imIn-' + self.name, self.b_imHit)
def b_imHit(self, cevent):
self.ignoreAll()
self.sendUpdate('hitSomebody', [localAvatar.doId, globalClockDelta.getFrameNetworkTime(16, 100)])
if self.type == 0:
base.race.localKart.hitBanana()
elif self.type == 1:
base.race.localKart.hitPie()
self.nodePath.hide()
if hasattr(self, 'bnp'):
self.bnp.remove()
def hitSomebody(self, avId, timeStamp):
if localAvatar.doId != avId:
kart = DistributedVehicle.getKartFromAvId(avId)
if kart:
self.nodePath.hide()
if hasattr(self, 'bnp'):
self.bnp.remove()
kart.playSpin(timeStamp)
def setActivateTime(self, actTime):
self.activateTime = actTime
def setInitTime(self, initTime):
self.initTime = initTime
def setRace(self, doId):
self.race = base.cr.doId2do.get(doId)
def setPos(self, x, y, z):
self.pos = (x, y, z)
def makeNodePath(self):
if self.type == 0:
self.nodePath = DroppedGag(self.uniqueName('gag'), base.race.banana)
if self.billboard:
self.nodePath.setBillboardPointEye()
self.nodePath.setScale(0.9 * self.scale)
if self.type == 1:
self.nodePath = DroppedGag(self.uniqueName('gag'), base.race.banana)
if self.billboard:
self.nodePath.setBillboardPointEye()
self.nodePath.setScale(4.0 * self.scale)
def setOwnerId(self, ownerId):
self.ownerId = ownerId
def setType(self, type):
self.type = type
|
the-stack_0_18219 | """
tests.utils.test_ports
~~~~~~~~~~~~~~~~~~~~~~
Test the port related utilities
"""
import functools
import time
from unittest import mock
import pytest
import saltfactories.utils.ports as ports_utils
class MockedCreateSocket:
"""
This class just mocks the `socket.socket(...)` call so that we return
the ports we want
"""
def __init__(self, ports):
self.ports = list(ports) + list(ports)
def __call__(self, *args, **kwargs):
port = self.ports.pop(0)
# Return a MockedSocket instance
return MockedSocket(port)
class MockedSocket:
"""
This class is used so that we can return the known port in the getsockname call
"""
def __init__(self, port):
self.port = port
def bind(self, *args, **kwargs):
pass
def getsockname(self):
return None, self.port
def close(self):
pass
def test_get_unused_localhost_port_unique():
"""
Tests that test_get_unused_localhost_port only returns unique ports on consecutive calls
"""
num_calls = 10
start_port = 1000
# The ports we're gonna get back
ports = []
for port in range(start_port, start_port + num_calls):
for _ in range(num_calls):
# We make sure each port is repeated consecutively
ports.append(port)
# Hold a reference to the list of unique ports
unique = set(ports)
# This list will hold all ports that the function returns
got_ports = []
# We'll get the unique ports
with mock.patch(
"socket.socket", new_callable=functools.partial(MockedCreateSocket, ports)
) as mocked_socket:
for _ in range(num_calls):
got_ports.append(ports_utils.get_unused_localhost_port(cached_seconds=1))
assert len(got_ports) == num_calls
assert set(got_ports) == unique
# Let's get ports again. Since not enough time has passed, we won't get any ports
with mock.patch(
"socket.socket", new_callable=functools.partial(MockedCreateSocket, ports + ports)
) as mocked_socket:
for _ in range(num_calls):
with pytest.raises(IndexError):
# we won't have enough ports
got_ports.append(ports_utils.get_unused_localhost_port(cached_seconds=1))
# Since we couldn't get repeated ports, got_ports remains as it was
assert len(got_ports) == num_calls
assert set(got_ports) == unique
# Now, if we sleep one second, the cached ports will be gone and we'll get repeated ports
time.sleep(1)
with mock.patch(
"socket.socket", new_callable=functools.partial(MockedCreateSocket, ports)
) as mocked_socket:
for _ in range(num_calls):
got_ports.append(ports_utils.get_unused_localhost_port(cached_seconds=1))
assert len(got_ports) == 2 * len(unique)
assert set(got_ports) == unique
|
the-stack_0_18220 | import os
import yaml
from easydict import EasyDict as edict
class YamlParser(edict):
"""
This is yaml parser based on EasyDict.
"""
def __init__(self, cfg_dict=None, config_file=None):
if cfg_dict is None:
cfg_dict = {}
if config_file is not None:
assert(os.path.isfile(config_file))
with open(config_file, 'r') as fo:
cfg_dict.update(yaml.load(fo.read()))
super(YamlParser, self).__init__(cfg_dict)
def merge_from_file(self, config_file):
with open(config_file, 'r') as fo:
self.update(yaml.load(fo.read()))
def merge_from_dict(self, config_dict):
self.update(config_dict)
def get_config(config_file=None):
return YamlParser(config_file=config_file)
if __name__ == "__main__":
cfg = YamlParser(config_file="../configs/rcnn.yaml")
cfg.merge_from_file("../configs/deep_sort.yaml")
import ipdb; ipdb.set_trace() |
the-stack_0_18221 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import date
class MockCall(dict):
def __init__(self, phone_number, call_type, date=date.today(), duration=1, serviceId=0, **kwargs):
self['number'] = phone_number
self['type'] = call_type
self['date'] = '{:%m/%d/%Y}'.format(date)
self['duration'] = duration
self['serviceId'] = serviceId
# update with any keyword arguments passed
self.update(**kwargs)
# allow getting items as if they were attributes
def __getattr__(self, attr):
return self[attr]
|
the-stack_0_18222 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from nova import exception
from nova import objects
from nova.objects import cell_mapping
from nova.tests.unit.objects import test_objects
def get_db_mapping(**updates):
db_mapping = {
'id': 1,
'uuid': uuidutils.generate_uuid(),
'name': 'cell1',
'transport_url': 'rabbit://',
'database_connection': 'sqlite:///',
'created_at': None,
'updated_at': None,
}
db_mapping.update(updates)
return db_mapping
class _TestCellMappingObject(object):
@mock.patch.object(cell_mapping.CellMapping, '_get_by_uuid_from_db')
def test_get_by_uuid(self, uuid_from_db):
db_mapping = get_db_mapping()
uuid_from_db.return_value = db_mapping
mapping_obj = objects.CellMapping().get_by_uuid(self.context,
db_mapping['uuid'])
uuid_from_db.assert_called_once_with(self.context, db_mapping['uuid'])
self.compare_obj(mapping_obj, db_mapping)
@mock.patch.object(cell_mapping.CellMapping, '_get_by_uuid_from_db',
side_effect=exception.CellMappingNotFound(uuid='fake'))
def test_get_by_uuid_invalid(self, uuid_from_db):
db_mapping = get_db_mapping()
self.assertRaises(exception.CellMappingNotFound,
objects.CellMapping().get_by_uuid,
self.context,
db_mapping['uuid'])
uuid_from_db.assert_called_once_with(self.context, db_mapping['uuid'])
@mock.patch.object(cell_mapping.CellMapping, '_create_in_db')
def test_create(self, create_in_db):
uuid = uuidutils.generate_uuid()
db_mapping = get_db_mapping(uuid=uuid, name='test',
database_connection='mysql+pymysql:///')
create_in_db.return_value = db_mapping
mapping_obj = objects.CellMapping(self.context)
mapping_obj.uuid = uuid
mapping_obj.name = 'test'
mapping_obj.database_connection = 'mysql+pymysql:///'
mapping_obj.create()
create_in_db.assert_called_once_with(self.context,
{'uuid': uuid,
'name': 'test',
'database_connection': 'mysql+pymysql:///'})
self.compare_obj(mapping_obj, db_mapping)
@mock.patch.object(cell_mapping.CellMapping, '_save_in_db')
def test_save(self, save_in_db):
uuid = uuidutils.generate_uuid()
db_mapping = get_db_mapping(database_connection='mysql+pymysql:///')
save_in_db.return_value = db_mapping
mapping_obj = objects.CellMapping(self.context)
mapping_obj.uuid = uuid
mapping_obj.database_connection = 'mysql+pymysql:///'
mapping_obj.save()
save_in_db.assert_called_once_with(self.context, uuid,
{'uuid': uuid,
'database_connection': 'mysql+pymysql:///'})
self.compare_obj(mapping_obj, db_mapping)
@mock.patch.object(cell_mapping.CellMapping, '_destroy_in_db')
def test_destroy(self, destroy_in_db):
uuid = uuidutils.generate_uuid()
mapping_obj = objects.CellMapping(self.context)
mapping_obj.uuid = uuid
mapping_obj.destroy()
destroy_in_db.assert_called_once_with(self.context, uuid)
class TestCellMappingObject(test_objects._LocalTest,
_TestCellMappingObject):
pass
class TestRemoteCellMappingObject(test_objects._RemoteTest,
_TestCellMappingObject):
pass
|
the-stack_0_18224 | """ Simple ping-pong command. """
from timbot.commands import BotCommand, Context, CommandHelp
class PingCommand(BotCommand):
"""
Example implementation of a command that responds Pong! when it is invoked.
Use this as a template!
"""
def __init__(self):
name = 'ping'
command_help = CommandHelp()
super().__init__(name, command_help)
async def _command_function(self, config, ctx):
await ctx.channel.send('Pong!')
|
the-stack_0_18226 | # Copyright (C) 2020 KeselekPermen69
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from userbot import bot, CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern=r"^\.nhentai(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
link = event.pattern_match.group(1)
chat = "@nHentaiBot"
await event.edit("```Processing```")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(
incoming=True,
from_users=424466890))
await bot.send_message(chat, link)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock @nHentaiBot and try again```")
return
if response.text.startswith("**Sorry I couldn't get manga from**"):
await event.edit("```I think this is not the right link```")
else:
await event.delete()
await bot.send_message(event.chat_id, response.message)
CMD_HELP.update({
"nhentai":
"`.nhentai` <link / code> \
\nUsage: view nhentai in telegra.ph XD\n"})
|
the-stack_0_18227 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
****************************************
* Evaluating a Checkpointed Model Once *
****************************************
Once we've trained a model, we'll want to evaluate it. The simplest way to do
this is to evaluate the performance of a saved model a single time. In order
to do this, we can specify a number of metrics we'll want to evaluate as well
as specify the summaries we want to save to disk. Furthermore, we can print
out the metrics values to stdout:
# Specify where the checkpoint is stored:
checkpoint_path = ...
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.contrib.metrics.streaming_accuracy(predictions, labels),
"mse": tf.contrib.metrics.streaming_mean_squared_error(
predictions, labels),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
names_to_values = evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=names_to_updates.values(),
final_ops=names_to_values,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
config=None)
for name in names_to_values:
print('Metric %s has value %f.' % (name, names_to_values[name]))
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluate_repeatedly method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.contrib.metrics.streaming_accuracy(predictions, labels),
"mse": tf.contrib.metrics.streaming_mean_squared_error(
predictions, labels),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
eval_ops=names_to_updates.values(),
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
*******************************************************
* Evaluating a Checkpointed Model with Summaries Only *
*******************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_ops' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.summary.scalar(...)
tf.summary.histogram(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
hooks=[
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
__all__ = [
'StopAfterNEvalsHook',
'SummaryAtEndHook',
'checkpoints_iterator',
'evaluate_once',
'evaluate_repeatedly',
'get_or_create_eval_step',
'wait_for_new_checkpoint',
]
# pylint: disable=protected-access
# pylint: disable=invalid-name
StopAfterNEvalsHook = evaluation._StopAfterNEvalsHook
evaluate_once = evaluation._evaluate_once
get_or_create_eval_step = evaluation._get_or_create_eval_step
# pylint: enable=invalid-name
# pylint: enable=protected-access
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum amount of time to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info('Found new checkpoint at %s', checkpoint_path)
return checkpoint_path
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info('Timed-out waiting for a checkpoint.')
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
class SummaryAtEndHook(session_run_hook.SessionRunHook):
"""A run hook that saves a summary with the results of evaluation."""
def __init__(self,
log_dir=None,
summary_writer=None,
summary_op=None,
feed_dict=None):
"""Constructs the Summary Hook.
Args:
log_dir: The directory where the summary events are saved to. Used only
when `summary_writer` is not specified.
summary_writer: A `tf.summary.FileWriter` to write summary events with.
summary_op: The summary op to run. If left as `None`, then all summaries
in the tf.GraphKeys.SUMMARIES collection are used.
feed_dict: An optional feed dictionary to use when evaluating the
summaries.
Raises:
ValueError: If both `log_dir` and `summary_writer` are `None`.
"""
self._summary_op = summary_op
self._replace_summary_op = summary_op is None
self._feed_dict = feed_dict
self._summary_writer = summary_writer
self._log_dir = log_dir
if self._log_dir is None and self._summary_writer is None:
raise ValueError('One of log_dir or summary_writer should be used.')
def begin(self):
if self._replace_summary_op:
self._summary_op = summary.merge_all()
self._global_step = variables.get_or_create_global_step()
def after_create_session(self, session, coord):
if self._summary_writer is None and self._log_dir:
self._summary_writer = summary.FileWriterCache.get(self._log_dir)
def end(self, session):
global_step = training_util.global_step(session, self._global_step)
summary_str = session.run(self._summary_op, self._feed_dict)
if self._summary_writer:
self._summary_writer.add_summary(summary_str, global_step)
self._summary_writer.flush()
def _scaffold_with_init(scaffold, saver, checkpoint_path):
"""Creates a scaffold that loads the given checkpoint using an init_fn.
Args:
scaffold: The scaffold to copy.
saver: The saver to use when restoring the checkpoint.
checkpoint_path: An absolute path to a checkpoint.
Returns:
A scaffold with an init_fn that loads the given checkpoint. If the scaffold
provided already has an init_fn, the scaffold is returned unchanged.
"""
def restore_checkpoint(_, session):
saver.restore(session, checkpoint_path)
if not scaffold.init_fn:
scaffold = monitored_session.Scaffold(
init_op=scaffold.init_op,
init_feed_dict=scaffold.init_feed_dict,
init_fn=restore_checkpoint,
ready_op=scaffold.ready_op,
local_init_op=scaffold.local_init_op,
summary_op=scaffold.summary_op,
saver=scaffold.saver)
return scaffold
def evaluate_repeatedly(checkpoint_dir,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
eval_interval_secs=60,
hooks=None,
config=None,
max_number_of_evaluations=None,
timeout=None,
timeout_fn=None):
"""Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_dir: The directory where checkpoints are stored.
master: The address of the TensorFlow master.
scaffold: An tf.train.Scaffold instance for initializing variables and
restoring variables. Note that `scaffold.init_fn` is used by the function
to restore the checkpoint. If you supply a custom init_fn, then it must
also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`, which is run until the session is requested to stop,
commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
eval_interval_secs: The minimum number of seconds between evaluations.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
evaluation loop.
config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
max_number_of_evaluations: The maximum times to run the evaluation. If left
as `None`, then evaluation runs indefinitely.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = get_or_create_eval_step()
# Prepare the run hooks.
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
num_evaluations = 0
for checkpoint_path in checkpoints_iterator(
checkpoint_dir,
min_interval_secs=eval_interval_secs,
timeout=timeout,
timeout_fn=timeout_fn):
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
num_evaluations += 1
if (max_number_of_evaluations is not None and
num_evaluations >= max_number_of_evaluations):
return final_ops_hook.final_ops_values
return final_ops_hook.final_ops_values
|
the-stack_0_18231 | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('config_file', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath, step):
""" Runs feature creation
"""
logger = logging.getLogger(__name__)
#logger.info(f'making final data set from {step} data')
####
logger.info(f'step succeded')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
#project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
#load_dotenv(find_dotenv())
main()
|
the-stack_0_18232 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from tests import FunctionalTest
from st2common.constants.api import CACHE_CONTROL_HEADER
class TestBase(FunctionalTest):
def test_defaults(self):
response = self.app.get('/')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://127.0.0.1:3000')
self.assertEqual(response.headers['Access-Control-Allow-Methods'],
'GET,POST,PUT,DELETE,OPTIONS')
self.assertEqual(response.headers['Access-Control-Allow-Headers'],
'Content-Type,Authorization,X-Auth-Token,St2-Api-Key,X-Request-ID')
self.assertEqual(response.headers['Access-Control-Expose-Headers'],
'Content-Type,X-Limit,X-Total-Count,X-Request-ID')
def test_origin(self):
response = self.app.get('/', headers={
'origin': 'http://127.0.0.1:3000'
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://127.0.0.1:3000')
def test_additional_origin(self):
response = self.app.get('/', headers={
'origin': 'http://dev'
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://dev')
def test_wrong_origin(self):
# Invalid origin (not specified in the config), we return first allowed origin specified
# in the config
response = self.app.get('/', headers={
'origin': 'http://xss'
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers.get('Access-Control-Allow-Origin'),
'http://127.0.0.1:3000')
invalid_origins = [
'http://',
'https://',
'https://www.example.com',
'null',
'*'
]
for origin in invalid_origins:
response = self.app.get('/', headers={
'origin': origin
})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers.get('Access-Control-Allow-Origin'),
'http://127.0.0.1:3000')
def test_wildcard_origin(self):
try:
cfg.CONF.set_override('allow_origin', ['*'], 'api')
response = self.app.get('/', headers={
'origin': 'http://xss'
})
finally:
cfg.CONF.clear_override('allow_origin', 'api')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.headers['Access-Control-Allow-Origin'],
'http://xss')
def test_valid_status_code_is_returned_on_invalid_path(self):
# TypeError: get_all() takes exactly 1 argument (2 given)
resp = self.app.get('/v1/executions/577f775b0640fd1451f2030b/re_run', expect_errors=True)
self.assertEqual(resp.status_int, 404)
# get_one() takes exactly 2 arguments (4 given)
resp = self.app.get('/v1/executions/577f775b0640fd1451f2030b/re_run/a/b',
expect_errors=True)
self.assertEqual(resp.status_int, 404)
def test_cache_control_present(self):
resp = self.app.options('/v1/executions/')
self.assertEqual(resp.status_int, 200)
self.assertIsInstance(CACHE_CONTROL_HEADER, str)
self.assertEqual(resp.headers['Cache-Control'],
CACHE_CONTROL_HEADER)
|
the-stack_0_18234 | """rename name column to scope in objects table
Revision ID: d32ee62c4759
Revises: 3d748babd72d
Create Date: 2021-07-28 21:46:07.241456
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'd32ee62c4759'
down_revision = '3d748babd72d'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column( # type: ignore
'objects',
'name',
new_column_name='scope',
)
op.drop_constraint( # type: ignore
'fk__objects__name_repo__scopes',
'objects',
)
op.create_foreign_key( # type: ignore
'fk__objects__scope_repo__scopes',
source_table='objects',
referent_table='scopes',
local_cols=['scope', 'repo'],
remote_cols=['name', 'repo'],
ondelete='CASCADE',
onupdate='CASCADE',
)
def downgrade():
op.alter_column( # type: ignore
'objects',
'scope',
new_column_name='name',
)
op.drop_constraint( # type: ignore
'fk__objects__scope_repo__scopes',
'objects',
)
op.create_foreign_key( # type: ignore
'fk__objects__name_repo__scopes',
source_table='objects',
referent_table='scopes',
local_cols=['name', 'repo'],
remote_cols=['name', 'repo'],
ondelete='CASCADE',
onupdate='CASCADE',
)
|
the-stack_0_18235 | import copy
import logging
from multiprocessing.pool import ThreadPool
import maven_repo_util
from maven_artifact import MavenArtifact
class Filter:
def __init__(self, config):
self.config = config
def filter(self, artifactList, threadnum):
"""
Filter artifactList removing excluded GAVs, duplicates and GAVs that exists in
excluded repositories.
:param artifactList: artifactList from ArtifactListBuilder.
:returns: filtered artifactList.
"""
if self.config.excludedGAVs:
artifactList = self._filterExcludedGAVs(artifactList)
if self.config.excludedTypes:
artifactList = self._filterExcludedTypes(artifactList)
artifactList = self._filterDuplicates(artifactList)
if self.config.singleVersion:
artifactList = self._filterMultipleVersions(artifactList)
if self.config.excludedRepositories:
artifactList = self._filterExcludedRepositories(artifactList, threadnum)
return artifactList
def _filterExcludedGAVs(self, artifactList):
"""
Filter artifactList removing specified GAVs.
:param artifactList: artifactList to be filtered.
:returns: artifactList without artifacts that matched specified GAVs.
"""
logging.debug("Filtering artifacts with excluded GAVs.")
regExps = maven_repo_util.getRegExpsFromStrings(self.config.excludedGAVs)
gavRegExps = []
gatcvRegExps = []
for regExp in regExps:
if regExp.pattern.count(":") > 2:
gatcvRegExps.append(regExp)
else:
gavRegExps.append(regExp)
for ga in artifactList.keys():
for priority in artifactList[ga].keys():
for version in artifactList[ga][priority].keys():
gav = "%s:%s" % (ga, version)
if maven_repo_util.somethingMatch(gavRegExps, gav):
logging.debug("Dropping GAV %s:%s from priority %i because it matches an excluded "
"GAV pattern.", ga, version, priority)
del artifactList[ga][priority][version]
else:
artSpec = artifactList[ga][priority][version]
for artType in copy.deepcopy(artSpec.artTypes.keys()):
at = artSpec.artTypes[artType]
for classifier in copy.deepcopy(at.classifiers):
if classifier:
gatcv = "%s:%s:%s:%s" % (ga, artType, classifier, version)
else:
gatcv = "%s:%s:%s" % (ga, artType, version)
if maven_repo_util.somethingMatch(gatcvRegExps, gatcv):
logging.debug("Dropping GATCV %s from priority %i because it matches an excluded "
"GAV pattern.", gatcv, priority)
at.classifiers.remove(classifier)
if not at.classifiers:
logging.debug("Dropping GATV %s:%s:%s from priority %i because of no classifiers left.",
ga, artType, version, priority)
del artSpec.artTypes[artType]
if not artSpec.containsMain():
logging.debug("Dropping GAV %s:%s from priority %i because of no main artifact left.",
ga, version, priority)
del artifactList[ga][priority][version]
if not artifactList[ga][priority]:
logging.debug("Dropping GA %s from priority %i because of no version left.", ga, priority)
del artifactList[ga][priority]
if not artifactList[ga]:
logging.debug("Dropping GA %s because of no priority left.", ga)
del artifactList[ga]
return artifactList
def _filterExcludedTypes(self, artifactList):
'''
Filter artifactList removing GAVs with specified main types only, otherwise keeping GAVs with
not-excluded artifact types only.
:param artifactList: artifactList to be filtered.
:param exclTypes: list of excluded types
:returns: artifactList without artifacts that matched specified types and had no other main types.
'''
logging.debug("Filtering artifacts with excluded types.")
regExps = maven_repo_util.getRegExpsFromStrings(self.config.gatcvWhitelist)
exclTypes = self.config.excludedTypes
for ga in artifactList.keys():
for priority in artifactList[ga].keys():
for version in artifactList[ga][priority].keys():
artSpec = artifactList[ga][priority][version]
for artType in list(artSpec.artTypes.keys()):
if artType in exclTypes:
artTypeObj = artSpec.artTypes[artType]
classifiers = artTypeObj.classifiers
(groupId, artifactId) = ga.split(':')
for classifier in list(classifiers):
art = MavenArtifact(groupId, artifactId, artType, version, classifier)
gatcv = art.getGATCV()
if not maven_repo_util.somethingMatch(regExps, gatcv):
logging.debug("Dropping classifier \"%s\" of %s:%s:%s from priority %i because of "
"excluded type.", classifier, ga, artType, version, priority)
classifiers.remove(classifier)
else:
logging.debug("Skipping drop of %s:%s:%s:%s from priority %i because it matches a "
"whitelist pattern.", ga, artType, classifier, version, priority)
if not classifiers:
logging.debug("Dropping %s:%s:%s from priority %i because of no classifier left.", ga,
artType, version, priority)
del(artSpec.artTypes[artType])
noMain = True
for artType in artSpec.artTypes.keys():
artTypeObj = artSpec.artTypes[artType]
if artTypeObj.mainType:
noMain = False
break
if not artSpec.artTypes or noMain:
if noMain:
logging.debug("Dropping GAV %s:%s from priority %i because of no main artifact left.",
ga, version, priority)
else:
logging.debug("Dropping GAV %s:%s from priority %i because of no artifact type left.",
ga, version, priority)
del artifactList[ga][priority][version]
if not artifactList[ga][priority]:
logging.debug("Dropping GA %s from priority %i because of no version left.", ga, priority)
del artifactList[ga][priority]
if not artifactList[ga]:
logging.debug("Dropping GA %s because of no priority left.", ga)
del artifactList[ga]
return artifactList
def _filterExcludedRepositories(self, artifactList, threadnum):
"""
Filter artifactList removing artifacts existing in specified repositories.
:param artifactList: artifactList to be filtered.
:returns: artifactList without artifacts that exists in specified repositories.
"""
logging.debug("Filtering artifacts contained in excluded repositories.")
pool = ThreadPool(threadnum)
# Contains artifact to be removed
delArtifacts = []
for ga in artifactList.keys():
groupId = ga.split(':')[0]
artifactId = ga.split(':')[1]
for priority in artifactList[ga].keys():
for version in artifactList[ga][priority].keys():
artifact = MavenArtifact(groupId, artifactId, "pom", version)
pool.apply_async(
_artifactInRepos,
[self.config.excludedRepositories, artifact, priority, delArtifacts]
)
# Close the pool and wait for the workers to finnish
pool.close()
pool.join()
for artifact, priority in delArtifacts:
ga = artifact.getGA()
logging.debug("Dropping GAV %s:%s from priority %i because it was found in an excluded repository.",
ga, artifact.version, priority)
del artifactList[ga][priority][artifact.version]
if not artifactList[ga][priority]:
logging.debug("Dropping GA %s from priority %i because of no version left.", ga, priority)
del artifactList[ga][priority]
if not artifactList[ga]:
logging.debug("Dropping GA %s because of no priority left.", ga)
del artifactList[ga]
return artifactList
def _filterDuplicates(self, artifactList):
"""
Filter artifactList removing duplicate artifacts.
:param artifactList: artifactList to be filtered.
:returns: artifactList without duplicate artifacts from lower priorities.
"""
logging.debug("Filtering duplicate artifacts.")
for ga in artifactList.keys():
for priority in sorted(artifactList[ga].keys()):
for version in artifactList[ga][priority].keys():
for pr in artifactList[ga].keys():
if pr <= priority:
continue
if version in artifactList[ga][pr]:
logging.debug("Dropping GAV %s:%s from priority %i because its duplicate was found in "
"priority %s.", ga, version, pr, priority)
if len(artifactList[ga][pr][version].paths):
artifactList[ga][priority][version].paths.extend(artifactList[ga][pr][version].paths)
del artifactList[ga][pr][version]
if not artifactList[ga][priority]:
logging.debug("Dropping GA %s from priority %i because of no version left.", ga, priority)
del artifactList[ga][priority]
if not artifactList[ga]:
logging.debug("Dropping GA %s because of no priority left.", ga)
del artifactList[ga]
return artifactList
def _filterMultipleVersions(self, artifactList):
logging.debug("Filtering multi-version artifacts to have just a single version.")
regExps = maven_repo_util.getRegExpsFromStrings(self.config.multiVersionGAs, False)
for ga in sorted(artifactList.keys()):
if maven_repo_util.somethingMatch(regExps, ga):
continue
# Gather all priorities
priorities = sorted(artifactList[ga].keys())
priority = priorities[0]
# Gather all versions
versions = list(artifactList[ga][priority].keys())
if len(versions) > 1: # list of 1 is sorted by definition
versions = maven_repo_util._sortVersionsWithAtlas(versions)
# Remove version, priorities and gats from artifactList as necessary
for version in versions[1:]:
logging.debug("Dropping GAV %s:%s from priority %i because only single version is allowed.", ga,
version, priority)
del artifactList[ga][priority][version]
for p in priorities[1:]:
logging.debug("Dropping GA %s from priority %i because of no version left.", ga, p)
del artifactList[ga][p]
if not artifactList[ga]:
logging.debug("Dropping GA %s because of no priority left.", ga)
del artifactList[ga]
return artifactList
def _artifactInRepos(repositories, artifact, priority, artifacts):
"""
Checks if artifact is available in one of the repositories, if so, appends
it with priority in list of pairs - artifacts. Used for multi-threading.
:param repositories: list of repository urls
:param artifact: searched MavenArtifact
:param priority: value of dictionary artifacts
:param artifacts: list with (artifact, priority) tuples
"""
for repoUrl in repositories:
if maven_repo_util.gavExists(repoUrl, artifact):
#Critical section?
artifacts.append((artifact, priority))
break
|
the-stack_0_18236 | import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import os
import numpy as np
import pandas as pd
import h5py
import cmapPy.pandasGEXpress.GCToo as GCToo
__author__ = "Oana Enache"
__email__ = "[email protected]"
# instantiate logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
version_node = "version"
rid_node = "/0/META/ROW/id"
cid_node = "/0/META/COL/id"
data_node = "/0/DATA/0/matrix"
row_meta_group_node = "/0/META/ROW"
col_meta_group_node = "/0/META/COL"
def parse(gctx_file_path, convert_neg_666=True, rid=None, cid=None,
ridx=None, cidx=None, row_meta_only=False, col_meta_only=False, make_multiindex=False):
"""
Primary method of script. Reads in path to a gctx file and parses into GCToo object.
Input:
Mandatory:
- gctx_file_path (str): full path to gctx file you want to parse.
Optional:
- convert_neg_666 (bool): whether to convert -666 values to numpy.nan or not
(see Note below for more details on this). Default = False.
- rid (list of strings): list of row ids to specifically keep from gctx. Default=None.
- cid (list of strings): list of col ids to specifically keep from gctx. Default=None.
- ridx (list of integers): only read the rows corresponding to this
list of integer ids. Default=None.
- cidx (list of integers): only read the columns corresponding to this
list of integer ids. Default=None.
- row_meta_only (bool): Whether to load data + metadata (if False), or just row metadata (if True)
as pandas DataFrame
- col_meta_only (bool): Whether to load data + metadata (if False), or just col metadata (if True)
as pandas DataFrame
- make_multiindex (bool): whether to create a multi-index df combining
the 3 component dfs
Output:
- myGCToo (GCToo): A GCToo instance containing content of parsed gctx file. Note: if meta_only = True,
this will be a GCToo instance where the data_df is empty, i.e. data_df = pd.DataFrame(index=rids,
columns = cids)
Note: why does convert_neg_666 exist?
- In CMap--for somewhat obscure historical reasons--we use "-666" as our null value
for metadata. However (so that users can take full advantage of pandas' methods,
including those for filtering nan's etc) we provide the option of converting these
into numpy.NaN values, the pandas default.
"""
full_path = os.path.expanduser(gctx_file_path)
# Verify that the path exists
if not os.path.exists(full_path):
err_msg = "The given path to the gctx file cannot be found. full_path: {}"
logger.error(err_msg.format(full_path))
raise Exception(err_msg.format(full_path))
logger.info("Reading GCTX: {}".format(full_path))
# open file
gctx_file = h5py.File(full_path, "r")
if row_meta_only:
# read in row metadata
row_dset = gctx_file[row_meta_group_node]
row_meta = parse_metadata_df("row", row_dset, convert_neg_666)
# validate optional input ids & get indexes to subset by
(sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, None)
gctx_file.close()
# subset if specified, then return
row_meta = row_meta.iloc[sorted_ridx]
return row_meta
elif col_meta_only:
# read in col metadata
col_dset = gctx_file[col_meta_group_node]
col_meta = parse_metadata_df("col", col_dset, convert_neg_666)
# validate optional input ids & get indexes to subset by
(sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, None, col_meta)
gctx_file.close()
# subset if specified, then return
col_meta = col_meta.iloc[sorted_cidx]
return col_meta
else:
# read in row metadata
row_dset = gctx_file[row_meta_group_node]
row_meta = parse_metadata_df("row", row_dset, convert_neg_666)
# read in col metadata
col_dset = gctx_file[col_meta_group_node]
col_meta = parse_metadata_df("col", col_dset, convert_neg_666)
# validate optional input ids & get indexes to subset by
(sorted_ridx, sorted_cidx) = check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta, col_meta)
data_dset = gctx_file[data_node]
data_df = parse_data_df(data_dset, sorted_ridx, sorted_cidx, row_meta, col_meta)
# (if subsetting) subset metadata
row_meta = row_meta.iloc[sorted_ridx]
col_meta = col_meta.iloc[sorted_cidx]
# get version
my_version = gctx_file.attrs[version_node]
if type(my_version) == np.ndarray:
my_version = my_version[0]
gctx_file.close()
# make GCToo instance
my_gctoo = GCToo.GCToo(data_df=data_df, row_metadata_df=row_meta, col_metadata_df=col_meta,
src=full_path, version=my_version, make_multiindex=make_multiindex)
return my_gctoo
def check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta_df, col_meta_df):
"""
Makes sure that (if entered) id inputs entered are of one type (string id or index)
Input:
- rid (list or None): if not None, a list of rids
- ridx (list or None): if not None, a list of indexes
- cid (list or None): if not None, a list of cids
- cidx (list or None): if not None, a list of indexes
Output:
- a tuple of the ordered ridx and cidx
"""
(row_type, row_ids) = check_id_idx_exclusivity(rid, ridx)
(col_type, col_ids) = check_id_idx_exclusivity(cid, cidx)
row_ids = check_and_convert_ids(row_type, row_ids, row_meta_df)
ordered_ridx = get_ordered_idx(row_type, row_ids, row_meta_df)
col_ids = check_and_convert_ids(col_type, col_ids, col_meta_df)
ordered_cidx = get_ordered_idx(col_type, col_ids, col_meta_df)
return (ordered_ridx, ordered_cidx)
def check_id_idx_exclusivity(id, idx):
"""
Makes sure user didn't provide both ids and idx values to subset by.
Input:
- id (list or None): if not None, a list of string id names
- idx (list or None): if not None, a list of integer id indexes
Output:
- a tuple: first element is subset type, second is subset content
"""
if (id is not None and idx is not None):
msg = ("'id' and 'idx' fields can't both not be None," +
" please specify subset in only one of these fields")
logger.error(msg)
raise Exception("parse_gctx.check_id_idx_exclusivity: " + msg)
elif id is not None:
return ("id", id)
elif idx is not None:
return ("idx", idx)
else:
return (None, [])
def check_and_convert_ids(id_type, id_list, meta_df):
if meta_df is not None:
if id_type == "id":
id_list = convert_ids_to_meta_type(id_list, meta_df)
check_id_validity(id_list, meta_df)
else:
check_idx_validity(id_list, meta_df)
return id_list
else:
return None
def check_id_validity(id_list, meta_df):
id_set = set(id_list)
meta_set = set(meta_df.index)
mismatch_ids = id_set - meta_set
if len(mismatch_ids) > 0:
msg = "some of the ids being used to subset the data are not present in the metadata for the file being parsed - mismatch_ids: {}".format(
mismatch_ids)
logger.error(msg)
raise Exception("parse_gctx check_id_validity " + msg)
def check_idx_validity(id_list, meta_df):
N = meta_df.shape[0]
out_of_range_ids = [my_id for my_id in id_list if my_id < 0 or my_id >= N]
if len(out_of_range_ids):
msg = "some of indexes being used to subset the data are not valid max N: {} out_of_range_ids: {}".format(N,
out_of_range_ids)
logger.error(msg)
raise Exception("parse_gctx check_idx_validity " + msg)
def convert_ids_to_meta_type(id_list, meta_df):
try:
return pd.Series(id_list).astype(meta_df.index.dtype).values
except ValueError as ve:
id_list_types = set([type(x) for x in id_list])
msg = "The type of the id_list (rid or cid) being used to subset the data is not compatible with the metadata id's in the file. Types found - meta_df.index.dtype: {} id_list_types: {}".format(
meta_df.index.dtype, id_list_types)
logger.error(msg)
raise Exception("parse_gctx check_if_ids_in_meta " + msg + " ValueError ve: {}".format(ve))
def get_ordered_idx(id_type, id_list, meta_df):
"""
Gets index values corresponding to ids to subset and orders them.
Input:
- id_type (str): either "id", "idx" or None
- id_list (list): either a list of indexes or id names
Output:
- a sorted list of indexes to subset a dimension by
"""
if meta_df is not None:
if id_type is None:
id_list = range(0, len(list(meta_df.index)))
elif id_type == "id":
lookup = {x: i for (i,x) in enumerate(meta_df.index)}
id_list = [lookup[str(i)] for i in id_list]
return sorted(id_list)
else:
return None
def parse_metadata_df(dim, meta_group, convert_neg_666):
"""
Reads in all metadata from .gctx file to pandas DataFrame
with proper GCToo specifications.
Input:
- dim (str): Dimension of metadata; either "row" or "column"
- meta_group (HDF5 group): Group from which to read metadata values
- convert_neg_666 (bool): whether to convert "-666" values to np.nan or not
Output:
- meta_df (pandas DataFrame): data frame corresponding to metadata fields
of dimension specified.
"""
# read values from hdf5 & make a DataFrame
header_values = {}
array_index = 0
for k in meta_group.keys():
curr_dset = meta_group[k]
temp_array = np.empty(curr_dset.shape, dtype=curr_dset.dtype)
curr_dset.read_direct(temp_array)
header_values[str(k)] = temp_array
array_index = array_index + 1
# need to temporarily make dtype of all values str so that to_numeric
# works consistently with gct vs gctx parser.
meta_df = pd.DataFrame.from_dict(header_values).astype(str)
# save the ids for later use in the index; we do not want to convert them to
# numeric
ids = meta_df["id"].copy()
del meta_df["id"]
# Convert metadata to numeric if possible, after converting everything to string first
# Note: This conversion first to string is to ensure consistent behavior between
# the gctx and gct parser (which by default reads the entire text file into a string)
meta_df = meta_df.apply(lambda x: pd.to_numeric(x, errors="ignore"))
meta_df.set_index(pd.Index(ids, dtype=str), inplace=True)
# Replace -666 and -666.0 with NaN; also replace "-666" if convert_neg_666 is True
meta_df = replace_666(meta_df, convert_neg_666)
# set index and columns appropriately
set_metadata_index_and_column_names(dim, meta_df)
return meta_df
def replace_666(meta_df, convert_neg_666):
""" Replace -666, -666.0, and optionally "-666".
Args:
meta_df (pandas df):
convert_neg_666 (bool):
Returns:
out_df (pandas df): updated meta_df
"""
if convert_neg_666:
out_df = meta_df.replace([-666, "-666", -666.0], np.nan)
else:
out_df = meta_df.replace([-666, -666.0], "-666")
return out_df
def set_metadata_index_and_column_names(dim, meta_df):
"""
Sets index and column names to GCTX convention.
Input:
- dim (str): Dimension of metadata to read. Must be either "row" or "col"
- meta_df (pandas.DataFrame): data frame corresponding to metadata fields
of dimension specified.
Output:
None
"""
if dim == "row":
meta_df.index.name = "rid"
meta_df.columns.name = "rhd"
elif dim == "col":
meta_df.index.name = "cid"
meta_df.columns.name = "chd"
def parse_data_df(data_dset, ridx, cidx, row_meta, col_meta):
"""
Parses in data_df from hdf5, subsetting if specified.
Input:
-data_dset (h5py dset): HDF5 dataset from which to read data_df
-ridx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-cidx (list): list of indexes to subset from data_df
(may be all of them if no subsetting)
-row_meta (pandas DataFrame): the parsed in row metadata
-col_meta (pandas DataFrame): the parsed in col metadata
"""
if len(ridx) == len(row_meta.index) and len(cidx) == len(col_meta.index): # no subset
data_array = np.empty(data_dset.shape, dtype=np.float32)
data_dset.read_direct(data_array)
data_array = data_array.transpose()
elif len(ridx) <= len(cidx):
first_subset = data_dset[:, ridx].astype(np.float32)
data_array = first_subset[cidx, :].transpose()
elif len(cidx) < len(ridx):
first_subset = data_dset[cidx, :].astype(np.float32)
data_array = first_subset[:, ridx].transpose()
# make DataFrame instance
data_df = pd.DataFrame(data_array, index=row_meta.index[ridx], columns=col_meta.index[cidx])
return data_df
def get_column_metadata(gctx_file_path, convert_neg_666=True):
"""
Opens .gctx file and returns only column metadata
Input:
Mandatory:
- gctx_file_path (str): full path to gctx file you want to parse.
Optional:
- convert_neg_666 (bool): whether to convert -666 values to num
Output:
- col_meta (pandas DataFrame): a DataFrame of all column metadata values.
"""
full_path = os.path.expanduser(gctx_file_path)
# open file
gctx_file = h5py.File(full_path, "r")
col_dset = gctx_file[col_meta_group_node]
col_meta = parse_metadata_df("col", col_dset, convert_neg_666)
gctx_file.close()
return col_meta
def get_row_metadata(gctx_file_path, convert_neg_666=True):
"""
Opens .gctx file and returns only row metadata
Input:
Mandatory:
- gctx_file_path (str): full path to gctx file you want to parse.
Optional:
- convert_neg_666 (bool): whether to convert -666 values to num
Output:
- row_meta (pandas DataFrame): a DataFrame of all row metadata values.
"""
full_path = os.path.expanduser(gctx_file_path)
# open file
gctx_file = h5py.File(full_path, "r")
row_dset = gctx_file[row_meta_group_node]
row_meta = parse_metadata_df("row", row_dset, convert_neg_666)
gctx_file.close()
return row_meta
|
the-stack_0_18237 | import json
import pathlib
import platform
from pprint import pformat
import sys
import os
from unittest.mock import MagicMock
import pytest
import ray
from ray.autoscaler._private.constants import AUTOSCALER_METRIC_PORT
from ray.ray_constants import PROMETHEUS_SERVICE_DISCOVERY_FILE
from ray._private.metrics_agent import PrometheusServiceDiscoveryWriter
from ray.util.metrics import Counter, Histogram, Gauge
from ray._private.test_utils import (wait_for_condition, SignalActor,
fetch_prometheus, get_log_batch)
os.environ["RAY_event_stats"] = "1"
try:
import prometheus_client
except ImportError:
prometheus_client = None
# This list of metrics should be kept in sync with src/ray/stats/metric_defs.h
# NOTE: Commented out metrics are not available in this test.
# TODO(Clark): Find ways to trigger commented out metrics in cluster setup.
_METRICS = [
"ray_gcs_latency_sum",
"ray_object_store_available_memory",
"ray_object_store_used_memory",
"ray_object_store_num_local_objects",
"ray_object_manager_num_pull_requests",
"ray_object_directory_subscriptions",
"ray_object_directory_updates",
"ray_object_directory_lookups",
"ray_object_directory_added_locations",
"ray_object_directory_removed_locations",
"ray_heartbeat_report_ms_sum",
"ray_process_startup_time_ms_sum",
"ray_internal_num_processes_started",
"ray_internal_num_received_tasks",
"ray_internal_num_dispatched_tasks",
"ray_internal_num_spilled_tasks",
"ray_internal_num_infeasible_tasks",
# "ray_object_spilling_bandwidth_mb",
# "ray_object_restoration_bandwidth_mb",
# "ray_unintentional_worker_failures_total",
# "ray_node_failure_total",
"ray_pending_actors",
"ray_outbound_heartbeat_size_kb_sum",
"ray_operation_count",
"ray_operation_run_time_ms",
"ray_operation_queue_time_ms",
"ray_operation_active_count",
"ray_placement_group_creation_latency_ms_sum",
"ray_placement_group_scheduling_latency_ms_sum",
"ray_pending_placement_group",
"ray_registered_placement_group",
"ray_infeasible_placement_group",
"ray_new_resource_creation_latency_ms_sum",
]
# This list of metrics should be kept in sync with
# ray/python/ray/autoscaler/_private/prom_metrics.py
_AUTOSCALER_METRICS = [
"autoscaler_config_validation_exceptions",
"autoscaler_node_launch_exceptions", "autoscaler_pending_nodes",
"autoscaler_reset_exceptions", "autoscaler_running_workers",
"autoscaler_started_nodes", "autoscaler_stopped_nodes",
"autoscaler_update_loop_exceptions", "autoscaler_worker_create_node_time",
"autoscaler_worker_update_time", "autoscaler_updating_nodes",
"autoscaler_successful_updates", "autoscaler_failed_updates",
"autoscaler_failed_create_nodes", "autoscaler_recovering_nodes",
"autoscaler_successful_recoveries", "autoscaler_failed_recoveries",
"autoscaler_drain_node_exceptions", "autoscaler_update_time"
]
@pytest.fixture
def _setup_cluster_for_test(ray_start_cluster):
NUM_NODES = 2
cluster = ray_start_cluster
# Add a head node.
cluster.add_node(_system_config={"metrics_report_interval_ms": 1000})
# Add worker nodes.
[cluster.add_node() for _ in range(NUM_NODES - 1)]
cluster.wait_for_nodes()
ray.init(address=cluster.address)
worker_should_exit = SignalActor.remote()
# Generate a metric in the driver.
counter = Counter("test_driver_counter", description="desc")
counter.inc()
# Generate some metrics from actor & tasks.
@ray.remote
def f():
counter = Counter("test_counter", description="desc")
counter.inc()
counter = ray.get(ray.put(counter)) # Test serialization.
counter.inc()
counter.inc(2)
ray.get(worker_should_exit.wait.remote())
# Generate some metrics for the placement group.
pg = ray.util.placement_group(bundles=[{"CPU": 1}])
ray.get(pg.ready())
print(ray.util.placement_group_table())
ray.util.remove_placement_group(pg)
@ray.remote
class A:
async def ping(self):
histogram = Histogram(
"test_histogram", description="desc", boundaries=[0.1, 1.6])
histogram = ray.get(ray.put(histogram)) # Test serialization.
histogram.record(1.5)
ray.get(worker_should_exit.wait.remote())
a = A.remote()
obj_refs = [f.remote(), a.ping.remote()]
node_info_list = ray.nodes()
prom_addresses = []
for node_info in node_info_list:
metrics_export_port = node_info["MetricsExportPort"]
addr = node_info["NodeManagerAddress"]
prom_addresses.append(f"{addr}:{metrics_export_port}")
autoscaler_export_addr = "{}:{}".format(cluster.head_node.node_ip_address,
AUTOSCALER_METRIC_PORT)
yield prom_addresses, autoscaler_export_addr
ray.get(worker_should_exit.send.remote())
ray.get(obj_refs)
ray.shutdown()
cluster.shutdown()
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
@pytest.mark.skipif(
prometheus_client is None, reason="Prometheus not installed")
def test_metrics_export_end_to_end(_setup_cluster_for_test):
TEST_TIMEOUT_S = 20
prom_addresses, autoscaler_export_addr = _setup_cluster_for_test
def test_cases():
components_dict, metric_names, metric_samples = fetch_prometheus(
prom_addresses)
# Raylet should be on every node
assert all(
"raylet" in components for components in components_dict.values())
# GCS server should be on one node
assert any("gcs_server" in components
for components in components_dict.values())
# Core worker should be on at least on node
assert any("core_worker" in components
for components in components_dict.values())
# Make sure our user defined metrics exist
for metric_name in [
"test_counter", "test_histogram", "test_driver_counter"
]:
assert any(metric_name in full_name for full_name in metric_names)
# Make sure metrics are recorded.
for metric in _METRICS:
assert metric in metric_names, \
f"metric {metric} not in {metric_names}"
# Make sure the numeric values are correct
test_counter_sample = [
m for m in metric_samples if "test_counter" in m.name
][0]
assert test_counter_sample.value == 4.0
test_driver_counter_sample = [
m for m in metric_samples if "test_driver_counter" in m.name
][0]
assert test_driver_counter_sample.value == 1.0
test_histogram_samples = [
m for m in metric_samples if "test_histogram" in m.name
]
buckets = {
m.labels["le"]: m.value
for m in test_histogram_samples if "_bucket" in m.name
}
# We recorded value 1.5 for the histogram. In Prometheus data model
# the histogram is cumulative. So we expect the count to appear in
# <1.1 and <+Inf buckets.
assert buckets == {"0.1": 0.0, "1.6": 1.0, "+Inf": 1.0}
hist_count = [m for m in test_histogram_samples
if "_count" in m.name][0].value
hist_sum = [m for m in test_histogram_samples
if "_sum" in m.name][0].value
assert hist_count == 1
assert hist_sum == 1.5
# Autoscaler metrics
_, autoscaler_metric_names, _ = fetch_prometheus(
[autoscaler_export_addr])
for metric in _AUTOSCALER_METRICS:
# Metric name should appear with some suffix (_count, _total,
# etc...) in the list of all names
assert any(name.startswith(metric) for name in
autoscaler_metric_names), \
f"{metric} not in {autoscaler_metric_names}"
def wrap_test_case_for_retry():
try:
test_cases()
return True
except AssertionError:
return False
try:
wait_for_condition(
wrap_test_case_for_retry,
timeout=TEST_TIMEOUT_S,
retry_interval_ms=1000, # Yield resource for other processes
)
except RuntimeError:
print(
f"The components are {pformat(fetch_prometheus(prom_addresses))}")
test_cases() # Should fail assert
def test_prometheus_file_based_service_discovery(ray_start_cluster):
# Make sure Prometheus service discovery file is correctly written
# when number of nodes are dynamically changed.
NUM_NODES = 5
cluster = ray_start_cluster
nodes = [cluster.add_node() for _ in range(NUM_NODES)]
cluster.wait_for_nodes()
addr = ray.init(address=cluster.address)
redis_address = addr["redis_address"]
writer = PrometheusServiceDiscoveryWriter(
redis_address, ray.ray_constants.REDIS_DEFAULT_PASSWORD, "/tmp/ray")
def get_metrics_export_address_from_node(nodes):
node_export_addrs = [
"{}:{}".format(node.node_ip_address, node.metrics_export_port)
for node in nodes
]
# monitor should be run on head node for `ray_start_cluster` fixture
autoscaler_export_addr = "{}:{}".format(
cluster.head_node.node_ip_address, AUTOSCALER_METRIC_PORT)
return node_export_addrs + [autoscaler_export_addr]
loaded_json_data = json.loads(writer.get_file_discovery_content())[0]
assert (set(get_metrics_export_address_from_node(nodes)) == set(
loaded_json_data["targets"]))
# Let's update nodes.
for _ in range(3):
nodes.append(cluster.add_node())
# Make sure service discovery file content is correctly updated.
loaded_json_data = json.loads(writer.get_file_discovery_content())[0]
assert (set(get_metrics_export_address_from_node(nodes)) == set(
loaded_json_data["targets"]))
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows.")
def test_prome_file_discovery_run_by_dashboard(shutdown_only):
ray.init(num_cpus=0)
global_node = ray.worker._global_node
temp_dir = global_node.get_temp_dir_path()
def is_service_discovery_exist():
for path in pathlib.Path(temp_dir).iterdir():
if PROMETHEUS_SERVICE_DISCOVERY_FILE in str(path):
return True
return False
wait_for_condition(is_service_discovery_exist)
@pytest.fixture
def metric_mock():
mock = MagicMock()
mock.record.return_value = "haha"
yield mock
"""
Unit test custom metrics.
"""
def test_basic_custom_metrics(metric_mock):
# Make sure each of metric works as expected.
# -- Counter --
count = Counter("count", tag_keys=("a", ))
with pytest.raises(TypeError):
count.inc("hi")
with pytest.raises(ValueError):
count.inc(0)
with pytest.raises(ValueError):
count.inc(-1)
count._metric = metric_mock
count.inc(1, {"a": "1"})
metric_mock.record.assert_called_with(1, tags={"a": "1"})
# -- Gauge --
gauge = Gauge("gauge", description="gauge")
gauge._metric = metric_mock
gauge.record(4)
metric_mock.record.assert_called_with(4, tags={})
# -- Histogram
histogram = Histogram(
"hist", description="hist", boundaries=[1.0, 3.0], tag_keys=("a", "b"))
histogram._metric = metric_mock
tags = {"a": "10", "b": "b"}
histogram.observe(8, tags=tags)
metric_mock.record.assert_called_with(8, tags=tags)
def test_custom_metrics_info(metric_mock):
# Make sure .info public method works.
histogram = Histogram(
"hist", description="hist", boundaries=[1.0, 2.0], tag_keys=("a", "b"))
assert histogram.info["name"] == "hist"
assert histogram.info["description"] == "hist"
assert histogram.info["boundaries"] == [1.0, 2.0]
assert histogram.info["tag_keys"] == ("a", "b")
assert histogram.info["default_tags"] == {}
histogram.set_default_tags({"a": "a"})
assert histogram.info["default_tags"] == {"a": "a"}
def test_custom_metrics_default_tags(metric_mock):
histogram = Histogram(
"hist", description="hist", boundaries=[1.0, 2.0],
tag_keys=("a", "b")).set_default_tags({
"b": "b"
})
histogram._metric = metric_mock
# Check specifying non-default tags.
histogram.record(10, tags={"a": "a"})
metric_mock.record.assert_called_with(10, tags={"a": "a", "b": "b"})
# Check overriding default tags.
tags = {"a": "10", "b": "c"}
histogram.record(8, tags=tags)
metric_mock.record.assert_called_with(8, tags=tags)
def test_custom_metrics_edge_cases(metric_mock):
# None or empty boundaries are not allowed.
with pytest.raises(ValueError):
Histogram("hist")
with pytest.raises(ValueError):
Histogram("hist", boundaries=[])
# Empty name is not allowed.
with pytest.raises(ValueError):
Counter("")
# The tag keys must be a tuple type.
with pytest.raises(TypeError):
Counter("name", tag_keys=("a"))
def test_metrics_override_shouldnt_warn(ray_start_regular, log_pubsub):
# https://github.com/ray-project/ray/issues/12859
@ray.remote
def override():
a = Counter("num_count", description="")
b = Counter("num_count", description="")
a.inc(1)
b.inc(1)
ray.get(override.remote())
# Check the stderr from the worker.
def matcher(log_batch):
return any("Attempt to register measure" in line
for line in log_batch["lines"])
match = get_log_batch(log_pubsub, 1, timeout=5, matcher=matcher)
assert len(match) == 0, match
def test_custom_metrics_validation(ray_start_regular_shared):
# Missing tag(s) from tag_keys.
metric = Counter("name", tag_keys=("a", "b"))
metric.set_default_tags({"a": "1"})
metric.inc(1.0, {"b": "2"})
metric.inc(1.0, {"a": "1", "b": "2"})
with pytest.raises(ValueError):
metric.inc(1.0)
with pytest.raises(ValueError):
metric.inc(1.0, {"a": "2"})
# Extra tag not in tag_keys.
metric = Counter("name", tag_keys=("a", ))
with pytest.raises(ValueError):
metric.inc(1.0, {"a": "1", "b": "2"})
# tag_keys must be tuple.
with pytest.raises(TypeError):
Counter("name", tag_keys="a")
# tag_keys must be strs.
with pytest.raises(TypeError):
Counter("name", tag_keys=(1, ))
metric = Counter("name", tag_keys=("a", ))
# Set default tag that isn't in tag_keys.
with pytest.raises(ValueError):
metric.set_default_tags({"a": "1", "c": "2"})
# Default tag value must be str.
with pytest.raises(TypeError):
metric.set_default_tags({"a": 1})
# Tag value must be str.
with pytest.raises(TypeError):
metric.inc(1.0, {"a": 1})
if __name__ == "__main__":
import sys
# Test suite is timing out. Disable on windows for now.
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_0_18238 | import torch
from torch import nn
from attack.attacker import AdditiveAttacker
import numpy as np
class FastGradientMethod(nn.Module, AdditiveAttacker):
def __init__(self, eps, norm_type:str, victim_model:nn.Module, loss_func, targeted=False):
nn.Module.__init__(self)
AdditiveAttacker.__init__(self, eps=eps)
self.norm_type = norm_type
self.victim_model = victim_model
self.targeted = targeted
self.loss_func = loss_func
self.victim_model.train()
def _get_perturbation(self, x, y=None):
tol = 1e-8
assert y is not None
x = x.requires_grad_(True)
pred = self.victim_model.forward(x)
loss = self.loss_func(pred, y)
self.victim_model.zero_grad()
loss.backward()
x_grad = x.grad.data.detach()
# Apply norm bound
if self.norm_type == 'inf':
x_grad = torch.sign(x_grad)
elif self.norm_type == '1':
ind = tuple(range(1, len(x.shape)))
x_grad = x_grad / (torch.sum(torch.abs(x_grad), axis=ind, keepdims=True) + tol)
elif self.norm_type == '2':
ind = tuple(range(1, len(x.shape)))
x_grad = x_grad / (torch.sqrt(torch.sum(x_grad*x_grad, axis=ind, keepdims=True)) + tol)
return x_grad
|
the-stack_0_18240 | from collections import deque
from .ahdl import *
from .ahdlvisitor import AHDLVisitor, AHDLCollector
from .graph import Graph
from .stg import State
from .stg_pipeline import PipelineState
from .utils import find_only_one_in
class StateReducer(object):
def process(self, hdlmodule):
for fsm in hdlmodule.fsms.values():
WaitForwarder().process(fsm)
IfForwarder().process(fsm)
graph = StateGraphBuilder().process(fsm)
self._remove_unreached_state(fsm, graph)
self._remove_empty_state(fsm, graph)
def _remove_unreached_state(self, fsm, graph):
for stg in fsm.stgs:
if len(stg.states) == 1:
continue
for state in stg.states[:]:
if not graph.has_node(state):
stg.states.remove(state)
def _remove_empty_state(self, fsm, graph):
transition_collector = AHDLCollector(AHDL_TRANSITION)
for stg in fsm.stgs:
for state in stg.states[:]:
if (not isinstance(state, PipelineState) and
len(state.codes) == 1 and
len(graph.preds(state)) == 1 and
state.codes[0].is_a(AHDL_TRANSITION)):
pred = list(graph.preds(state))[0]
transition_collector.process_state(pred)
for _, codes in transition_collector.results.items():
for c in codes:
if c.target is state:
c.target = state.codes[0].target
stg.states.remove(state)
class StateGraph(Graph):
pass
class StateGraphBuilder(AHDLVisitor):
def process(self, fsm):
self.graph = StateGraph()
init_state = fsm.stgs[0].init_state
nexts = deque([init_state])
visited = set()
while nexts:
state = nexts.popleft()
visited.add(state)
self.next_states = []
self.visit(state)
for next in self.next_states:
self.graph.add_edge(state, next)
if next not in visited:
nexts.append(next)
return self.graph
def visit_AHDL_TRANSITION(self, ahdl):
assert isinstance(ahdl.target, State)
self.next_states.append(ahdl.target)
class WaitForwarder(AHDLVisitor):
def process(self, fsm):
for stg in fsm.stgs:
for state in stg.states:
if isinstance(state, PipelineState):
continue
wait = find_only_one_in(AHDL_META_WAIT, state.codes)
if wait and wait.transition.target is not state:
self.merge_wait_function(wait)
else:
self.visit(state)
def merge_wait_function(self, wait_func):
next_state_codes = wait_func.transition.target.codes
if next_state_codes[-1].is_a(AHDL_META_WAIT):
return
if wait_func.codes:
wait_func.codes.extend(wait_func.transition.target.codes)
else:
wait_func.codes = wait_func.transition.target.codes
# we don't remove the target codes
# because the target might be reached from an another state
#wait_func.transition.target.codes = []
wait_func.transition = None
def visit_AHDL_TRANSITION_IF(self, ahdl):
for ahdlblk in ahdl.blocks:
wait = find_only_one_in(AHDL_META_WAIT, ahdlblk.codes)
if wait:
self.merge_wait_function(wait)
class IfForwarder(AHDLVisitor):
def process(self, fsm):
self.forwarded = set()
for stg in fsm.stgs:
for state in stg.states:
if isinstance(state, PipelineState):
continue
self.visit(state)
def visit_AHDL_TRANSITION_IF(self, ahdl):
if ahdl in self.forwarded:
return
for i, ahdlblk in enumerate(ahdl.blocks):
transition = ahdlblk.codes[-1]
assert transition.is_a(AHDL_TRANSITION)
if isinstance(transition.target, PipelineState):
continue
ahdlblk.codes.pop()
ahdl.blocks[i].codes.extend(transition.target.codes[:])
self.forwarded.add(ahdl)
|
the-stack_0_18241 | # -*- coding: utf-8 -*-
"""Template parser for Faker"""
from datetime import datetime
from jinja2 import Template
from faker import Faker
from faker.providers.internet import Provider as InternetProvider
from ..providers.file_data_source_provider import FileDataSourceProvider
from ..providers.numbers_provider import NumbersProvider
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
# Ips networks emails etc..
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = TemplateParser.null_date_generator \
if date_generator is None else date_generator
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
# Only the passed objects will be accessible from the template
# the next built-in needs to be passed for next(date_generator) to work
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator,
next=next, **self.providers, **kwargs)
|
the-stack_0_18242 | HARDWARE_ITEMS = [
{'attributes': [],
'capacity': '999',
'description': 'Unknown',
'itemCategory': {'categoryCode': 'unknown', 'id': 325},
'keyName': 'UNKNOWN',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1245172,
'itemId': 935954,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '64',
'description': '1 IPv6 Address',
'itemCategory': {'categoryCode': 'pri_ipv6_addresses',
'id': 325},
'keyName': '1_IPV6_ADDRESS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 17129,
'itemId': 4097,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '10',
'description': '10 Mbps Public & Private Network Uplinks',
'itemCategory': {'categoryCode': 'port_speed', 'id': 26},
'keyName': '10_MBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 272,
'itemId': 186,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 5}]},
{'attributes': [],
'capacity': '0',
'description': 'Ubuntu Linux 14.04 LTS Trusty Tahr (64 bit)',
'itemCategory': {'categoryCode': 'os', 'id': 12},
'keyName': 'OS_UBUNTU_14_04_LTS_TRUSTY_TAHR_64_BIT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 37650,
'itemId': 4702,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 9}],
'softwareDescription': {'id': 1362,
'longDescription': 'Ubuntu / 14.04-64',
'referenceCode': 'UBUNTU_14_64'}},
{'attributes': [],
'capacity': '1',
'description': '1 IP Address',
'itemCategory': {'categoryCode': 'pri_ip_addresses', 'id': 13},
'keyName': '1_IP_ADDRESS',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 21,
'itemId': 15,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [{'attributeTypeKeyName': 'RECLAIM_BYPASS',
'id': 1014}],
'description': 'Unlimited SSL VPN Users',
'itemCategory': {'categoryCode': 'vpn_management', 'id': 31},
'keyName': 'SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 420,
'itemId': 309,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'description': 'Reboot / KVM over IP',
'itemCategory': {'categoryCode': 'remote_management',
'id': 46},
'keyName': 'REBOOT_KVM_OVER_IP',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 906,
'itemId': 504,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0}]},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'id': 22505,
'itemId': 4481,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 98}]},
{'attributes': [],
'capacity': '0',
'description': '0 GB Bandwidth',
'itemCategory': {'categoryCode': 'bandwidth', 'id': 10},
'keyName': 'BANDWIDTH_0_GB_2',
'prices': [{'accountRestrictions': [],
'currentPriceFlag': '',
'hourlyRecurringFee': '0',
'id': 1800,
'itemId': 439,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'quantity': '',
'setupFee': '0',
'sort': 99}]}]
getAllObjects = [{
'activePresets': [{
'description': 'Single Xeon 1270, 8GB Ram, 2x1TB SATA disks, Non-RAID',
'id': 64,
'isActive': '1',
'keyName': 'S1270_8GB_2X1TBSATA_NORAID',
'name': 'S1270 8GB 2X1TBSATA NORAID',
'packageId': 200
}],
'description': 'Bare Metal Server',
'firstOrderStepId': 1,
'id': 200,
'isActive': 1,
'items': HARDWARE_ITEMS,
'name': 'Bare Metal Server',
'regions': [{'description': 'WDC01 - Washington, DC - East Coast U.S.',
'keyname': 'WASHINGTON_DC',
'location': {'location': {'id': 37473,
'longName': 'Washington 1',
'name': 'wdc01'}},
'sortOrder': 10}],
'subDescription': 'Bare Metal Server',
'unitSize': 1,
}]
getItems = [
{
'id': 1234,
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'prices': [{'id': 1122,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 2233,
'capacity': '1000',
'description': 'Public & Private Networks',
'itemCategory': {'categoryCode': 'Uplink Port Speeds'},
'prices': [{'id': 4477,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}]}],
},
{
'id': 1239,
'capacity': '2',
'description': 'RAM',
'itemCategory': {'categoryCode': 'RAM'},
'prices': [{'id': 1133,
'categories': [{'id': 3,
'name': 'RAM',
'categoryCode': 'ram'}]}],
},
{
'id': 1240,
'capacity': '4',
'description': 'Private Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 1007,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 1250,
'capacity': '4',
'description': 'Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 1144,
'locationGroupId': None,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 112233,
'capacity': '55',
'description': 'Computing Instance',
'itemCategory': {'categoryCode': 'Computing Instance'},
'prices': [{'id': 332211,
'locationGroupId': 1,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}]}],
},
{
'id': 4439,
'capacity': '1',
'description': '1 GB iSCSI Storage',
'itemCategory': {'categoryCode': 'iscsi'},
'prices': [{'id': 2222}],
},
{
'id': 1121,
'capacity': '20',
'description': '20 GB iSCSI snapshot',
'itemCategory': {'categoryCode': 'iscsi_snapshot_space'},
'prices': [{'id': 2014}],
},
{
'id': 4440,
'capacity': '4',
'description': '4 Portable Public IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_pub'},
'prices': [{'id': 4444}],
},
{
'id': 8880,
'capacity': '8',
'description': '8 Portable Public IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_pub'},
'prices': [{'id': 8888}],
},
{
'id': 44400,
'capacity': '4',
'description': '4 Portable Private IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_priv'},
'prices': [{'id': 44441}],
},
{
'id': 88800,
'capacity': '8',
'description': '8 Portable Private IP Addresses',
'itemCategory': {'categoryCode': 'sov_sec_ip_addresses_priv'},
'prices': [{'id': 88881}],
},
{
'id': 10,
'capacity': '0',
'description': 'Global IPv4',
'itemCategory': {'categoryCode': 'global_ipv4'},
'prices': [{'id': 11}],
},
{
'id': 66464,
'capacity': '64',
'description': '/64 Block Portable Public IPv6 Addresses',
'itemCategory': {'categoryCode': 'static_ipv6_addresses'},
'prices': [{'id': 664641}],
},
{
'id': 610,
'capacity': '0',
'description': 'Global IPv6',
'itemCategory': {'categoryCode': 'global_ipv6'},
'prices': [{'id': 611}],
}]
getItemPrices = [
{
'currentPriceFlag': '',
'id': 2152,
'item': {
'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{'id': 46626,
'itemPriceId': 2152, 'packageId': 0}],
'quantity': '',
'recurringFee': '.35',
'setupFee': '0',
'sort': 0
},
{
'currentPriceFlag': '',
'id': 22501,
'item': {'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{
'id': 252983,
'itemPriceId': 22501, 'packageId': 0
}],
'quantity': '',
'recurringFee': '0',
'setupFee': '0',
'sort': 0
},
{
'currentPriceFlag': '',
'id': 22441,
'item': {
'capacity': '1',
'description': '1 GB iSCSI SAN Storage',
'id': 1111,
'softwareDescriptionId': '',
'units': 'GB',
'upgradeItemId': 547
},
'itemId': 1111,
'laborFee': '0',
'onSaleFlag': '',
'oneTimeFee': '0',
'packageReferences': [{'id': 250326,
'itemPriceId': 22441, 'packageId': 0}],
'quantity': '',
'recurringFee': '15',
'setupFee': '0',
'sort': 0
}]
|
the-stack_0_18244 | # -*- coding: utf-8 -*-
# Copyright 2009 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
from tidylib import tidy_fragment
class TestFrags1(unittest.TestCase):
""" Test some sample fragment documents """
def test_frag_with_unclosed_tag(self):
h = "<p>hello"
expected = '''<p>
hello
</p>'''
doc, err = tidy_fragment(h)
self.assertEqual(doc, expected)
def test_frag_with_incomplete_img_tag(self):
h = "<img src='foo'>"
expected = '''<img src='foo' alt="" />'''
doc, err = tidy_fragment(h)
self.assertEqual(doc, expected)
def test_frag_with_entity(self):
h = "é"
expected = "é"
doc, err = tidy_fragment(h)
self.assertEqual(doc, expected)
expected = "é"
doc, err = tidy_fragment(h, {'numeric-entities':1})
self.assertEqual(doc, expected)
def test_frag_with_unicode(self):
h = u"unicode string ß"
expected = h
doc, err = tidy_fragment(h)
self.assertEqual(doc, expected)
def test_frag_with_unicode_subclass(self):
class MyUnicode(unicode):
pass
h = MyUnicode(u"unicode string ß")
expected = h
doc, err = tidy_fragment(h)
self.assertEqual(doc, expected)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_18248 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define normalization api
import paddle
import paddle.fluid as fluid
from ...fluid.data_feeder import check_variable_and_dtype, check_type
from ...fluid.layer_helper import LayerHelper
from ...framework import create_parameter
from ..initializer import Constant
from ...framework import ParamAttr
from ...fluid import dygraph_utils
import numbers
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.fluid.framework import core, _non_static_mode, in_dygraph_mode, _in_legacy_dygraph
__all__ = []
def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
r"""
This op normalizes ``x`` along dimension ``axis`` using :math:`L_p` norm. This layer computes
.. math::
y = \frac{x}{ \max\left( \lvert \lvert x \rvert \rvert_p, epsilon\right) }
.. math::
\lvert \lvert x \rvert \rvert_p = \left( \sum_i {\lvert x_i \rvert^p} \right)^{1/p}
where, :math:`\sum_i{\lvert x_i \rvert^p}` is calculated along the ``axis`` dimension.
Parameters:
x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64.
p (float|int, optional): The exponent value in the norm formulation. Default: 2
axis (int, optional): The axis on which to apply normalization. If `axis < 0`, the dimension to normalization is `x.ndim + axis`. -1 is the last dimension.
epsilon (float, optional): Small float added to denominator to avoid dividing by zero. Default is 1e-12.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the output has the same shape and data type with ``x``.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn.functional as F
paddle.disable_static()
x = np.arange(6, dtype=np.float32).reshape(2,3)
x = paddle.to_tensor(x)
y = F.normalize(x)
print(y.numpy())
# [[0. 0.4472136 0.8944272 ]
# [0.42426404 0.5656854 0.7071067 ]]
y = F.normalize(x, p=1.5)
print(y.numpy())
# [[0. 0.40862012 0.81724024]
# [0.35684016 0.4757869 0.5947336 ]]
y = F.normalize(x, axis=0)
print(y.numpy())
# [[0. 0.24253564 0.37139067]
# [1. 0.97014254 0.9284767 ]]
"""
if in_dygraph_mode():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _C_ops.final_state_p_norm(x, float(p), axis, epsilon, True, False)
return x / _C_ops.elementwise_max(out, eps)
if _in_legacy_dygraph():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _C_ops.p_norm(x, 'axis', axis, 'porder',
float(p), 'keepdim', True, 'epsilon', epsilon)
return x / _C_ops.elementwise_max(out, eps)
check_type(p, 'p', (float, int), 'normalize')
check_type(axis, 'axis', (int), 'normalize')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'normalize')
if len(x.shape) == 1 and axis != 0 and axis != -1:
raise ValueError(
"Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".
format(axis))
attrs = {
'axis': axis,
'porder': float(p),
'keepdim': True,
'epsilon': epsilon,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
eps = out.block.create_var(dtype=out.dtype)
eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype)
return paddle.divide(x, paddle.maximum(out, eps), name=name)
def batch_norm(x,
running_mean,
running_var,
weight,
bias,
training=False,
momentum=0.9,
epsilon=1e-05,
data_format="NCHW",
use_global_stats=None,
name=None):
"""
Applies Batch Normalization as described in the paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift .
nn.functional.batch_norm is uesd for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm.
Parameters:
x(Tesnor): input value. It's data type should be float32, float64.
running_mean(Tensor): running mean.
running_var(Tensor): running variance.
weight(Tensor): The weight tensor of batch_norm, can not be None.
bias(Tensor): The bias tensor of batch_norm can not be None.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
training(bool, optional): True means train mode which compute by batch data and track global mean and var during train period. False means inference mode which compute by global mean and var which calculated by train period. Defalut False.
data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Defalut "NCHW".
use_global_stats(bool|None, optional): Whether to use global mean and variance. If set to False, use the statistics of one mini-batch, if set to True, use the global statistics, if set to None, use global statistics in the test phase and use the statistics of one mini-batch in the training phase. Default: None.
name(str, optional): Name for the BatchNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
x = np.random.seed(123)
x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
running_mean = np.random.random(size=1).astype('float32')
running_variance = np.random.random(size=1).astype('float32')
weight_data = np.random.random(size=1).astype('float32')
bias_data = np.random.random(size=1).astype('float32')
x = paddle.to_tensor(x)
rm = paddle.to_tensor(running_mean)
rv = paddle.to_tensor(running_variance)
w = paddle.to_tensor(weight_data)
b = paddle.to_tensor(bias_data)
batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
print(batch_norm_out)
"""
assert len(x.shape) >= 2, "input dim must be larger than 1"
# input ad out must share the memory
mean_out = running_mean
variance_out = running_var
true_data_format = ['NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC']
if data_format not in true_data_format:
raise ValueError(
"data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
"'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format))
data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'
if use_global_stats == None:
use_global_stats = not training
trainable_statistics = False
else:
trainable_statistics = not use_global_stats
if in_dygraph_mode():
batch_norm_out, _, _, _, _, _ = _C_ops.final_state_batch_norm(
x, weight, bias, running_mean, running_var, momentum, epsilon,
data_format, not training, use_global_stats, trainable_statistics,
False)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=None)
elif _in_legacy_dygraph():
# for dygraph need tuple
attrs = ("momentum", momentum, "epsilon", epsilon, "is_test",
not training, "data_layout", data_format, "use_mkldnn", False,
"fuse_with_relu", False, "use_global_stats", use_global_stats,
"trainable_statistics", trainable_statistics)
batch_norm_out, _, _, _, _, _ = _C_ops.batch_norm(
x, weight, bias, running_mean, running_var, None, mean_out,
variance_out, *attrs)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=None)
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'BatchNorm')
# for static need dict
attrs = {
"momentum": momentum,
"epsilon": epsilon,
"is_test": not training,
"data_layout": data_format,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"trainable_statistics": trainable_statistics,
}
inputs = {
"X": [x],
"Scale": [weight],
"Bias": [bias],
"Mean": [running_mean],
"Variance": [running_var]
}
helper = LayerHelper('batch_norm', **locals())
param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
saved_mean = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True)
batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [running_mean],
"VarianceOut": [running_var],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
if training or trainable_statistics:
# reserve_space is only used for training.
reserve_space = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
outputs["ReserveSpace"] = [reserve_space]
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return helper.append_activation(batch_norm_out)
def layer_norm(x,
normalized_shape,
weight=None,
bias=None,
epsilon=1e-05,
name=None):
"""
see more detail in paddle.nn.LayerNorm
Parameters:
x(Tensor): Input Tensor. It's data type should be float32, float64.
normalized_shape(int|list|tuple): Input shape from an expected input of
size :math:`[*, normalized_shape[0], normalized_shape[1], ..., normalized_shape[-1]]`.
If it is a single integer, this module will normalize over the last dimension
which is expected to be of that specific size.
epsilon(float, optional): The small value added to the variance to prevent
division by zero. Default: 1e-05.
weight(Tensor, optional): The weight tensor of batch_norm. Default: None.
bias(Tensor, optional): The bias tensor of batch_norm. Default: None.
name(str, optional): Name for the LayerNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Returns:
None
Examples:
.. code-block:: python
import paddle
import numpy as np
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:])
print(layer_norm_out)
"""
input_shape = list(x.shape)
input_ndim = len(input_shape)
if isinstance(normalized_shape, numbers.Integral):
normalized_shape = [normalized_shape]
elif isinstance(normalized_shape, tuple):
normalized_shape = list(normalized_shape)
elif not isinstance(normalized_shape, list):
raise ValueError(
"`normalized_shape` should be int, list of ints or tuple of ints.")
normalized_ndim = len(normalized_shape)
begin_norm_axis = input_ndim - normalized_ndim
if input_ndim < normalized_ndim or input_shape[
begin_norm_axis:] != normalized_shape:
str_normalized_shape = str(normalized_shape)
raise ValueError('Given normalized_shape is ' + str_normalized_shape +
', expected input with shape [*, ' +
str_normalized_shape[
1:] + ', but got input shape ' + str(input_shape))
if in_dynamic_mode():
pre_act, _, _ = _C_ops.layer_norm(x, weight, bias, 'epsilon', epsilon,
'begin_norm_axis', begin_norm_axis)
return dygraph_utils._append_activation_in_dygraph(pre_act, act=None)
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'LayerNorm')
inputs = dict()
inputs['X'] = [x]
if weight:
inputs['Scale'] = [weight]
if bias:
inputs['Bias'] = [bias]
attrs = {"epsilon": epsilon, "begin_norm_axis": begin_norm_axis}
# create output
helper = LayerHelper('layer_norm', **locals())
dtype = x.dtype
mean_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
variance_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": layer_norm_out,
"Mean": mean_out,
"Variance": variance_out,
},
attrs={"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis})
return helper.append_activation(layer_norm_out)
def instance_norm(x,
running_mean=None,
running_var=None,
weight=None,
bias=None,
use_input_stats=True,
momentum=0.9,
eps=1e-05,
data_format="NCHW",
name=None):
"""
See more detail in nn.layer.InstanceNorm2D.
Parameters:
x(Tensor): Input Tensor. It's data type should be float32, float64.
running_mean(Tensor): running mean. Default None.
running_var(Tensor): running variance. Default None.
weight(Tensor, optional): The weight tensor of instance_norm. Default: None.
bias(Tensor, optional): The bias tensor of instance_norm. Default: None.
eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
use_input_stats(bool): Default True.
data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW".
name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..
Returns:
None.
Examples:
.. code-block:: python
import paddle
import numpy as np
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm_out = paddle.nn.functional.instance_norm(x)
print(instance_norm_out)
"""
if in_dynamic_mode():
out, _, _ = _C_ops.instance_norm(x, weight, bias, "epsilon", eps,
"momentum", momentum, "data_format",
data_format)
return out
check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm")
attrs = {"epsilon": eps, "momentum": momentum, "data_format": data_format}
if weight and bias:
inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
else:
inputs = {"X": [x]}
helper = LayerHelper('instance_norm', **locals())
saved_mean = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
instance_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [instance_norm_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance]
}
helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs)
return instance_norm_out
def local_response_norm(x,
size,
alpha=1e-4,
beta=0.75,
k=1.,
data_format="NCHW",
name=None):
r"""
Local Response Normalization performs a type of "lateral inhibition" by normalizing over local input regions.
For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
The formula is as follows:
.. math::
Output(i, x, y) = Input(i, x, y) / \left(k + \alpha \sum\limits^{\min(C-1, i + size/2)}_{j = \max(0, i - size/2)}(Input(j, x, y))^2\right)^{\beta}
In the above equation:
- :math:`size` : The number of channels to sum over.
- :math:`k` : The offset (avoid being divided by 0).
- :math:`\\alpha` : The scaling parameter.
- :math:`\\beta` : The exponent parameter.
Args:
x (Tensor): The input 3-D/4-D/5-D tensor. The data type is float32.
size (int): The number of channels to sum over.
alpha (float, optional): The scaling parameter, positive. Default:1e-4
beta (float, optional): The exponent, positive. Default:0.75
k (float, optional): An offset, positive. Default: 1.0
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from:
If x is 3-D Tensor, the string could be `"NCL"` or `"NLC"` . When it is `"NCL"`,
the data is stored in the order of: `[batch_size, input_channels, feature_length]`.
If x is 4-D Tensor, the string could be `"NCHW"`, `"NHWC"`. When it is `"NCHW"`,
the data is stored in the order of: `[batch_size, input_channels, input_height, input_width]`.
If x is 5-D Tensor, the string could be `"NCDHW"`, `"NDHWC"` . When it is `"NCDHW"`,
the data is stored in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
name (str, optional): Name for the operation (optional, default is None). For more information,
please refer to :ref:`api_guide_Name`.
Returns:
A tensor storing the transformation result with the same shape and data type as input.
Examples:
.. code-block:: python
import paddle
x = paddle.rand(shape=(3, 3, 112, 112), dtype="float32")
y = paddle.nn.functional.local_response_norm(x, size=5)
print(y.shape) # [3, 3, 112, 112]
"""
if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32'], 'local_response_norm')
if data_format not in ['NCL', 'NLC', 'NCHW', 'NHWC', 'NCDHW', 'NDHWC']:
raise ValueError(
"data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \
"but got {}".format(data_format))
sizes = x.shape
dim = len(sizes)
if dim < 3:
raise ValueError(
'Expected 3D or higher dimensionality input, but got {} dimensions'.
format(dim))
for i, sz in enumerate(sizes):
if not sz > 0 and i > 0:
raise ValueError("Expected every dim's size to be larger than 0, "
"but the size of the {}-th dim is {}".format(i,
sz))
channel_last = True if data_format[-1] == "C" else False
from functools import reduce
sum_sizes = reduce(lambda x, y: x * y, sizes[1:])
div = paddle.unsqueeze(paddle.multiply(x, x), axis=1)
if not channel_last:
pad4d_shape = [0, 0, size // 2, (size - 1) // 2]
pool2d_shape = (size, 1)
reshape_shape = [
sizes[0], 1, sizes[1], sizes[2],
int(sum_sizes / (sizes[1] * sizes[2]))
]
pad5d_shape = [0, 0, 0, 0, size // 2, (size - 1) // 2]
pool3d_shape = (size, 1, 1)
else:
pad4d_shape = [size // 2, (size - 1) // 2, 0, 0]
pool2d_shape = (1, size)
reshape_shape = [
sizes[0], 1, sizes[1], int(sum_sizes / (sizes[1] * sizes[-1])),
sizes[-1]
]
pad5d_shape = [size // 2, (size - 1) // 2, 0, 0, 0, 0]
pool3d_shape = (1, 1, size)
if dim == 3:
div = paddle.nn.functional.pad(div, pad=pad4d_shape)
div = paddle.nn.functional.avg_pool2d(
div, kernel_size=pool2d_shape, stride=1)
div = paddle.squeeze(div, axis=1)
else:
div = paddle.reshape(div, shape=reshape_shape)
div = paddle.nn.functional.pad(div,
pad=pad5d_shape,
data_format='NCDHW')
div = paddle.nn.functional.avg_pool3d(
div, kernel_size=pool3d_shape, stride=1)
div = paddle.reshape(paddle.squeeze(div, axis=1), sizes)
div = paddle.scale(div, scale=alpha, bias=k)
div = paddle.pow(div, beta)
res = paddle.divide(x, div, name=name)
return res
|
the-stack_0_18250 | # crie um programa que leia uma frase qualquer e
# diga se ela é um polindromo, desconsiderando os espaços
# ex: apos a sopa
# ex: a sacada da casa
# ex: a torre da derrota
# ex: o lobo ama o bolo
# ex: anotaram a data da maratona
frase = str(input('Digite uma frase: ')).strip().upper()
palavras = frase.split()
junto = '' .join(palavras)
inverso = ''
print('Voce digitou a frase {}' .format(junto))
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
print('O inverso de {} eh {}' .format(junto, inverso))
if inverso == junto:
print('Temos um palindromo!')
else:
print('A frase digitada nao eh um palindromo!')
|
the-stack_0_18251 | import grpc
from concurrent import futures
import time
import meter_pb2_grpc as pb2_grpc
import meter_pb2 as pb2
from google.protobuf.json_format import MessageToDict
from service import get_measurement
import traceback
class UnaryService(pb2_grpc.MeasurementService):
def __init__(self, *args, **kwargs):
pass
def GetServerResponse(self, request, context):
print(f"GetServerResponse Request: {MessageToDict(request)}")
result = pb2.MeterList(start=request.start, end=request.end)
try:
meter_list = get_measurement(result.start.seconds, result.end.seconds)
# print(f"get_measurement {meter_list}")
result.list.extend(meter_list)
except Exception as ex:
print(f'GetServerResponse Exception {ex}')
traceback.print_stack()
print(f"GetServerResponse Length: {len(result.list)}")
return result
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pb2_grpc.add_MeasurementServiceServicer_to_server(UnaryService(), server)
server.add_insecure_port('[::]:50051')
print("Server starting")
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
# python3 -m grpc_tools.protoc --proto_path=. ./meter.proto --python_out=. --grpc_python_out=.
|
the-stack_0_18254 | import os
import shutil
from test.behave_utils.utils import drop_database_if_exists, start_database_if_not_started,\
create_database, \
run_command, check_user_permissions, run_gpcommand
from steps.mirrors_mgmt_utils import MirrorMgmtContext
from gppylib.db import dbconn
def before_feature(context, feature):
# we should be able to run gpexpand without having a cluster initialized
tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors']
if set(context.feature.tags).intersection(tags_to_skip):
return
drop_database_if_exists(context, 'testdb')
drop_database_if_exists(context, 'bkdb')
drop_database_if_exists(context, 'fullbkdb')
drop_database_if_exists(context, 'schematestdb')
if 'analyzedb' in feature.tags:
start_database_if_not_started(context)
drop_database_if_exists(context, 'incr_analyze')
create_database(context, 'incr_analyze')
drop_database_if_exists(context, 'incr_analyze_2')
create_database(context, 'incr_analyze_2')
context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze'))
context.dbname = 'incr_analyze'
# setting up the tables that will be used
context.execute_steps(u"""
Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public"
And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public"
And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public"
""")
if 'minirepro' in feature.tags:
start_database_if_not_started(context)
minirepro_db = 'minireprodb'
drop_database_if_exists(context, minirepro_db)
create_database(context, minirepro_db)
context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db))
context.dbname = minirepro_db
dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)')
dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)')
dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)')
dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e')
dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f')
dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c')
dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)')
dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)')
dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)')
context.conn.commit()
def after_feature(context, feature):
if 'analyzedb' in feature.tags:
context.conn.close()
if 'minirepro' in feature.tags:
context.conn.close()
def before_scenario(context, scenario):
if 'gpmovemirrors' in context.feature.tags:
context.mirror_context = MirrorMgmtContext()
tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors']
if set(context.feature.tags).intersection(tags_to_skip):
return
if 'analyzedb' not in context.feature.tags:
start_database_if_not_started(context)
drop_database_if_exists(context, 'testdb')
def after_scenario(context, scenario):
tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpinitstandby']
if set(context.feature.tags).intersection(tags_to_skip):
return
if 'gpmovemirrors' in context.feature.tags:
if 'temp_base_dir' in context:
shutil.rmtree(context.temp_base_dir)
if 'analyzedb' not in context.feature.tags:
start_database_if_not_started(context)
home_dir = os.path.expanduser('~')
if not check_user_permissions(home_dir, 'write') and hasattr(context, 'orig_write_permission')\
and context.orig_write_permission:
run_command(context, 'sudo chmod u+w %s' % home_dir)
if os.path.isdir('%s/gpAdminLogs.bk' % home_dir):
shutil.move('%s/gpAdminLogs.bk' % home_dir, '%s/gpAdminLogs' % home_dir)
if 'gpssh' in context.feature.tags:
run_command(context, 'sudo tc qdisc del dev lo root netem')
# for cleaning up after @given('"{path}" has its permissions set to "{perm}"')
if (hasattr(context, 'path_for_which_to_restore_the_permissions') and
hasattr(context, 'permissions_to_restore_path_to')):
os.chmod(context.path_for_which_to_restore_the_permissions, context.permissions_to_restore_path_to)
elif hasattr(context, 'path_for_which_to_restore_the_permissions'):
raise Exception('Missing permissions_to_restore_path_to for %s' %
context.path_for_which_to_restore_the_permissions)
elif hasattr(context, 'permissions_to_restore_path_to'):
raise Exception('Missing path_for_which_to_restore_the_permissions despite the specified permission %o' %
context.permissions_to_restore_path_to)
|
the-stack_0_18255 | import math
import numpy as np
import numpy.random as npr
import torch
import torch.utils.data as data
import torch.utils.data.sampler as torch_sampler
from torch.utils.data.dataloader import default_collate
from torch._six import int_classes as _int_classes
from core.config import cfg
from roi_data.minibatch import get_minibatch
import utils.blob as blob_utils
# from utils.iou_dataset import generate_rois
# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes
class RoiDataLoader(data.Dataset):
def __init__(self, roidb, num_classes, training=True):
self._roidb = roidb
self._num_classes = num_classes
self.training = training
self.DATA_SIZE = len(self._roidb)
def __getitem__(self, index_tuple):
index, ratio = index_tuple
single_db = [self._roidb[index]]
blobs, valid = get_minibatch(single_db)
#TODO: Check if minibatch is valid ? If not, abandon it.
# Need to change _worker_loop in torch.utils.data.dataloader.py.
# Squeeze batch dim
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
if self._roidb[index]['need_crop']:
self.crop_data(blobs, ratio)
# Check bounding box
entry = blobs['roidb'][0]
boxes = entry['boxes']
invalid = (boxes[:, 0] == boxes[:, 2]) | (boxes[:, 1] == boxes[:, 3])
valid_inds = np.nonzero(~ invalid)[0]
if len(valid_inds) < len(boxes):
for key in ['boxes', 'gt_classes', 'seg_areas', 'gt_overlaps', 'is_crowd',
'box_to_gt_ind_map', 'gt_keypoints']:
if key in entry:
entry[key] = entry[key][valid_inds]
entry['segms'] = [entry['segms'][ind] for ind in valid_inds]
# only support batch_size=1 when training iou branch
# if cfg.MODEL.IoU_ON and cfg.MODEL.IoU_ONLY:
# blobs['jittered_rois'], blobs['jittered_ious'] = generate_rois(single_db[0])
blobs['roidb'] = blob_utils.serialize(blobs['roidb']) # CHECK: maybe we can serialize in collate_fn
return blobs
def crop_data(self, blobs, ratio):
data_height, data_width = map(int, blobs['im_info'][:2])
boxes = blobs['roidb'][0]['boxes']
if ratio < 1: # width << height, crop height
size_crop = math.ceil(data_width / ratio) # size after crop
min_y = math.floor(np.min(boxes[:, 1]))
max_y = math.floor(np.max(boxes[:, 3]))
box_region = max_y - min_y + 1
if min_y == 0:
y_s = 0
else:
if (box_region - size_crop) < 0:
y_s_min = max(max_y - size_crop, 0)
y_s_max = min(min_y, data_height - size_crop)
y_s = y_s_min if y_s_min == y_s_max else \
npr.choice(range(y_s_min, y_s_max + 1))
else:
# CHECK: rethinking the mechnism for the case box_region > size_crop
# Now, the crop is biased on the lower part of box_region caused by
# // 2 for y_s_add
y_s_add = (box_region - size_crop) // 2
y_s = min_y if y_s_add == 0 else \
npr.choice(range(min_y, min_y + y_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, y_s:(y_s + size_crop), :,]
# Update im_info
blobs['im_info'][0] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 1] -= y_s
boxes[:, 3] -= y_s
np.clip(boxes[:, 1], 0, size_crop - 1, out=boxes[:, 1])
np.clip(boxes[:, 3], 0, size_crop - 1, out=boxes[:, 3])
blobs['roidb'][0]['boxes'] = boxes
else: # width >> height, crop width
size_crop = math.ceil(data_height * ratio)
min_x = math.floor(np.min(boxes[:, 0]))
max_x = math.floor(np.max(boxes[:, 2]))
box_region = max_x - min_x + 1
if min_x == 0:
x_s = 0
else:
if (box_region - size_crop) < 0:
x_s_min = max(max_x - size_crop, 0)
x_s_max = min(min_x, data_width - size_crop)
x_s = x_s_min if x_s_min == x_s_max else \
npr.choice(range(x_s_min, x_s_max + 1))
else:
x_s_add = (box_region - size_crop) // 2
x_s = min_x if x_s_add == 0 else \
npr.choice(range(min_x, min_x + x_s_add + 1))
# Crop the image
blobs['data'] = blobs['data'][:, :, x_s:(x_s + size_crop)]
# Update im_info
blobs['im_info'][1] = size_crop
# Shift and clamp boxes ground truth
boxes[:, 0] -= x_s
boxes[:, 2] -= x_s
np.clip(boxes[:, 0], 0, size_crop - 1, out=boxes[:, 0])
np.clip(boxes[:, 2], 0, size_crop - 1, out=boxes[:, 2])
blobs['roidb'][0]['boxes'] = boxes
def __len__(self):
return self.DATA_SIZE
def cal_minibatch_ratio(ratio_list):
"""Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU.
Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`
and 2) cfg.TRAIN.SCALES containing SINGLE scale.
Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can
pad and batch images base on that.
"""
DATA_SIZE = len(ratio_list)
ratio_list_minibatch = np.empty((DATA_SIZE,))
num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers
for i in range(num_minibatch):
left_idx = i * cfg.TRAIN.IMS_PER_BATCH
right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)
if ratio_list[right_idx] < 1:
# for ratio < 1, we preserve the leftmost in each batch.
target_ratio = ratio_list[left_idx]
elif ratio_list[left_idx] > 1:
# for ratio > 1, we preserve the rightmost in each batch.
target_ratio = ratio_list[right_idx]
else:
# for ratio cross 1, we make it to be 1.
target_ratio = 1
ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio
return ratio_list_minibatch
class MinibatchSampler(torch_sampler.Sampler):
def __init__(self, ratio_list, ratio_index, shuffle=True):
self.ratio_list = ratio_list
self.ratio_index = ratio_index
self.num_data = len(ratio_list)
self.shuffle = shuffle
if cfg.TRAIN.ASPECT_GROUPING:
# Given the ratio_list, we want to make the ratio same
# for each minibatch on each GPU.
self.ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
def __iter__(self):
if cfg.TRAIN.ASPECT_GROUPING:
# indices for aspect grouping awared permutation
n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
indices = np.arange(round_num_data)
if self.shuffle:
npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH)) # inplace shuffle
if rem != 0:
indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
ratio_index = self.ratio_index[indices]
ratio_list_minibatch = self.ratio_list_minibatch[indices]
else:
if self.shuffle:
rand_perm = npr.permutation(self.num_data)
else:
rand_perm = np.arange(self.num_data)
ratio_list = self.ratio_list[rand_perm]
ratio_index = self.ratio_index[rand_perm]
# re-calculate minibatch ratio list
ratio_list_minibatch = cal_minibatch_ratio(ratio_list)
return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))
def __len__(self):
return self.num_data
class BatchSampler(torch_sampler.BatchSampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(range(10), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(range(10), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, torch_sampler.Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integeral value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx) # Difference: batch.append(int(idx))
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def collate_minibatch(list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]
for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):
mini_list = list_of_blobs[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
# Pad image data
mini_list = pad_image_data(mini_list)
minibatch = default_collate(mini_list)
minibatch['roidb'] = list_of_roidb[i:(i + cfg.TRAIN.IMS_PER_BATCH)]
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
def pad_image_data(list_of_blobs):
max_shape = blob_utils.get_max_shape([blobs['data'].shape[1:] for blobs in list_of_blobs])
output_list = []
for blobs in list_of_blobs:
data_padded = np.zeros((3, max_shape[0], max_shape[1]), dtype=np.float32)
_, h, w = blobs['data'].shape
data_padded[:, :h, :w] = blobs['data']
blobs['data'] = data_padded
output_list.append(blobs)
return output_list
|
the-stack_0_18258 | """Get EMT2013 parameters from fitting script output file."""
from ase.atoms import string2symbols
def get_parameters(file, number=2):
file = open(file)
text = file.read()
file.close()
# Find elements
s = -1
for i in range(number):
s = text.find('Optimization', s + 1)
if s < 0:
raise ValueError("No results in file (keyword Optimization missing in file)")
e = text.find('\n', s)
errfunc = float(text[s:e].split()[6])
result = {}
s = e + 1
e = text.find('Fitting values', s) - 4
for line in text[s:e].split('\n'):
words = line.strip().split()
if words[1] == 'parameters':
elements = tuple(string2symbols(words[0]))
result[elements] = []
else:
result[elements].append(float(words[1]))
return result, errfunc
|
the-stack_0_18259 | #!/usr/bin/env python3
# Copyright (c) 2016 Fabian Schuiki
#
# This script gathers the contents of a bunch of CSV files into one, prefixing
# the information with additional columns whose values it extracts from the
# name of the subdirectory where the corresponding rows were found.
import sys, csv, os, argparse
from collections import OrderedDict
# Parse the command line arguments.
parser = argparse.ArgumentParser(prog="potstill collect", description="Collect results from multiple parametric sweep runs.")
parser.add_argument("FILENAME", type=str, help="name of the file to collect")
parser.add_argument("-r", "--rows", action="store_true", help="treat first line as header, others as data")
parser.add_argument("-d", "--dir", metavar=("DIR", "PARAMS"), nargs=2, action="append", default=[])
args = parser.parse_args()
def dirs():
if len(args.dir) == 0:
for d in os.listdir():
yield (d,d)
else:
for d in args.dir:
yield d
# Make a list of all files that need to be merged into one.
rows = list()
keys = list()
keys_seen = set()
for (d,dp) in dirs():
f = d+"/"+args.FILENAME
if os.path.isdir(d) and os.path.exists(f):
# Split the directory name into parameter names and values.
params = OrderedDict([a.split("=") for a in dp.split(",")])
# Read the file.
new_rows = list()
with open(f) as fd:
rd = csv.reader(fd)
if args.rows:
columns = next(rd)
for row in rd:
data = params.copy()
data.update(OrderedDict(zip(columns,row)))
new_rows.append(data)
else:
data = params.copy()
data.update(OrderedDict([(x[0], float(x[1])) for x in rd]))
new_rows.append(data)
# Merge in the data.
for row in new_rows:
rows.append(row)
for k in row.keys():
if k not in keys_seen:
keys.append(k)
keys_seen.add(k)
# Output the merged data as CSV.
wr = csv.writer(sys.stdout)
wr.writerow(keys)
for r in rows:
row = [(r[k] if k in r else None) for k in keys]
wr.writerow(row)
|
the-stack_0_18260 | # -*- coding:utf-8 -*-
import os
import subprocess
import sys
source = sys.argv[1]
TYPES = ('.jpeg', '.png', '.jpg')
def ensure_dir(img_file):
folder_path = os.path.split(img_file)[0]
dir_path = os.path.join(folder_path, 'output')
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def convert_a_img(img_file):
file = os.path.split(img_file)[1]
folder_path = os.path.split(img_file)[0]
name = os.path.splitext(file)[0]
suffix = os.path.splitext(file)[1]
output_path = os.path.join(folder_path, 'output')
url_out = os.path.join(output_path, name + '_mini' + suffix)
subprocess.call(['guetzli', '--quality', '84', '--verbose', img_file, url_out])
###########################################################################
#main
def main():
if os.path.isdir(source):
for root, dirs, files in os.walk(source):
for a_file in files:
suffix = os.path.splitext(a_file)[1]
if suffix in TYPES:
ensure_dir(os.path.join(root, a_file))
convert_a_img(os.path.join(root, a_file))
pass
elif os.path.isfile(source):
ensure_dir(source)
convert_a_img(source)
else:
print('Please check the input, not found any files or folders.')
main()
|
the-stack_0_18262 | # -*- coding: utf-8 -*-
"""
sphinxcontrib.cf3domain
~~~~~~~~~~~~~~~~~~~~~~~~
The CFEngine 3 domain.
:copyright: Copyright 2015 by Jimmy Thrasibule
:license: BSD, see LICENSE file for details.
"""
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType
from sphinx.domains.std import Program
from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, TypedField, GroupedField
cf3_sig_re = re.compile(
r'''^(\w+\s+)? # Type
(?:([\w-]+):)? # Namespace
([\w\.-]+)\s* # Identifier
(?:\((.*)\)|::)?$ # Argument list
''',
re.VERBOSE
)
_SP = ' '
_CL_MRK = '::'
_FN_SEP = '.'
_NS_SEP = ':'
class CF3Object(ObjectDescription):
"""
Description of a general CFEngine 3 object.
"""
doc_field_types = [
TypedField(
'parameters',
label=l_('Parameters'),
names=('parameter', 'param', 'argument', 'arg'),
typenames=('paramtype', 'type'),
typerolename='obj'
),
GroupedField(
'classes',
label=l_('Classes'),
names=('classes', )
),
GroupedField(
'defined',
label=l_('Defines'),
names=('defines', 'sets')
),
Field(
'type',
label=l_('Type'),
names=('type', )
),
Field(
'returnvalue',
has_arg=False,
label=l_('Returns'),
names=('returns', 'return')
),
Field(
'returntype',
has_arg=False,
label=l_('Return type'),
names=('rtype', )
),
]
def handle_signature(self, sig, signode):
"""
Transform a CFEngine 3 signature into RST nodes.
"""
sig_d = {}
m = cf3_sig_re.match(sig)
try:
sig_d['type'], sig_d['ns'], sig_d['id'], sig_d['args'] = m.groups()
except AttributeError:
raise ValueError
sig_d['ns'] = sig_d['ns'] or self.options.get(
'namespace', self.env.temp_data.get('cf3:namespace')
)
fullname = []
fn_app = fullname.append
fn_app(self.objtype + _FN_SEP)
if self.objtype in ('body', 'bundle'):
tnode = addnodes.desc_type('', '')
tnode += nodes.Text(self.objtype + _SP, self.objtype)
try:
sig_d['type'] = sig_d['type'].strip()
tnode += nodes.Text(sig_d['type'] + _SP, sig_d['type'])
fn_app(sig_d['type'] + _FN_SEP)
except AttributeError:
self.state_machine.reporter.warning(
'invalid %s definition: %s' % (self.objtype, sig),
line=self.lineno
)
signode += tnode
if sig_d['ns'] and sig_d['ns'] != 'default' \
and self.env.config.add_module_names:
signode += addnodes.desc_addname(
sig_d['ns'], sig_d['ns'] + _NS_SEP
)
signode += addnodes.desc_name(sig_d['id'], sig_d['id'])
fn_app(sig_d['id'])
signode['namespace'] = sig_d['ns']
signode['fullname'] = ''.join(fullname)
if self.objtype == 'class':
signode += addnodes.desc_name(_CL_MRK, _CL_MRK)
return ''.join(fullname), sig_d['ns']
if not sig_d['args'] and self.objtype == 'function':
signode += addnodes.desc_parameterlist()
return ''.join(fullname), sig_d['ns']
if sig_d['args']:
paramlist = addnodes.desc_parameterlist()
for arg in sig_d['args'].split(','):
arg = arg.strip()
paramlist += addnodes.desc_parameter(arg, arg)
signode += paramlist
return ''.join(fullname), sig_d['ns']
def get_index_text(self, modname, name):
"""
Return the text for the index entry of the object.
"""
objtype = self.objtype
idxname = name[0].split(_FN_SEP)[-1]
idxtext = []
idx_app = idxtext.append
idx_app('%(idxname)s')
if objtype == 'function':
idx_app('()')
if modname == 'default':
idx_app(' (%(objtype)s)')
else:
idx_app(' (%(objtype)s in %(modname)s)')
return _(''.join(idxtext)) % locals()
def add_target_and_index(self, name, sig, signode):
"""
Add cross-reference IDs and entries to self.indexnode, if applicable.
"""
modname = name[1] or self.options.get(
'namespace', self.env.temp_data.get('cf3:namespace')
)
fullname = (modname and modname + _NS_SEP or '') + name[0]
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['cf3']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname
+ 'other instance in '
+ self.env.doc2path(objects[fullname][0])
+ ', use :noindex: for one of them',
line=self.lineno
)
objects[fullname] = (self.env.docname, self.objtype)
idxtext = self.get_index_text(modname, name)
if idxtext:
self.indexnode['entries'].append(
('single', idxtext, fullname, '')
)
class CF3Namespace(Directive):
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
def run(self):
modname = self.arguments[0].strip()
env = self.state.document.settings.env
env.temp_data['cf3:namespace'] = modname
ret = []
if 'noindex' in self.options:
return ret
env.domaindata['cf3']['namespaces'][modname] = (
env.docname, self.options.get('synopsis', ''),
'deprecated' in self.options,
)
targetnode = nodes.target(
'', '', ids=['namespace-' + modname], ismod=True
)
self.state.document.note_explicit_target(targetnode)
ret.append(targetnode)
indextext = _('%s (namespace)') % modname
ret.append(
addnodes.index(entries=[
('single', indextext, 'namespace-' + modname, modname)
])
)
return ret
class CF3XRefRole(XRefRole):
"""
Cross-referencing role for the CFEngine 3 domain.
"""
def process_link(self, env, refnode, has_explicit_title, title, target):
if not has_explicit_title:
target = target.lstrip('~')
target = target.lstrip('-')
if title[0:1] == '~':
title = title[1:].split(_NS_SEP)[-1]
elif title[0:1] == '-':
title = title[1:].split(_SP)[-1]
elif refnode['reftype'] in ('body', 'bundle'):
title = refnode['reftype'] + _SP + title
return title, target
class CF3Domain(Domain):
"""CFEngine 3 policy domain."""
name = 'cf3'
label = 'CFEngine 3'
object_types = {
'attribute': ObjType(l_('attribute'), 'attr', 'obj'),
'body': ObjType(l_('body'), 'body', 'obj'),
'bundle': ObjType(l_('bundle'), 'bundle', 'obj'),
'class': ObjType(l_('class'), 'class', 'obj'),
'component': ObjType(l_('component'), 'comp', 'obj'),
'function': ObjType(l_('function'), 'func', 'obj'),
'namespace': ObjType(l_('namespace'), 'ns', 'obj'),
'promise': ObjType(l_('promise'), 'promise', 'obj'),
'variable': ObjType(l_('variable'), 'var', 'obj'),
}
directives = {
'attribute': CF3Object,
'body': CF3Object,
'bundle': CF3Object,
'class': CF3Object,
'component': Program,
'function': CF3Object,
'namespace': CF3Namespace,
'promise': CF3Object,
'variable': CF3Object,
}
roles = {
'attr': CF3XRefRole(),
'body': CF3XRefRole(),
'bundle': CF3XRefRole(),
'class': CF3XRefRole(),
'comp': CF3XRefRole(),
'func': CF3XRefRole(fix_parens=False),
'ns': CF3XRefRole(),
'promise': CF3XRefRole(),
'var': CF3XRefRole(),
}
initial_data = {
'objects': {},
'namespaces': {},
}
def clear_doc(self, docname):
for fullname, (fn, _) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
for ns, (fn, _, _) in list(self.data['namespaces'].items()):
if fn == docname:
del self.data['namespaces'][ns]
def find_obj(self, env, modname, sig, typ):
if not sig:
return []
modname = modname and modname + ':' or ''
s_type = sig['type'] and _FN_SEP + sig['type'] or ''
s_id = sig['id'] and _FN_SEP + sig['id'] or ''
prefix = '%(modname)s%(typ)s' % locals()
suffix = '%(s_type)s%(s_id)s' % locals()
objects = self.data['objects']
startswith = (o for o in objects if o.startswith(prefix))
return tuple(
(m, objects[m]) for m in startswith if m.endswith(suffix)
)
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
if typ in ('ns', 'obj'):
try:
docname, synopsis, deprecated = self.data['namespace'][target]
except KeyError:
return None
title = '%s%s' % ( synopsis, (deprecated and ' (deprecated)' or ''))
return make_refnode(
builder, fromdocname, docname, 'namespace-' + target,
contnode, title
)
sig_d = {}
m = cf3_sig_re.match(target)
try:
sig_d['type'], sig_d['ns'], sig_d['id'], sig_d['args'] = m.groups()
except AttributeError:
return None
try:
sig_d['type'] = sig_d['type'].strip()
except AttributeError:
pass
modname = sig_d['ns'] or node.get('cf3:namespace', 'default')
matches = self.find_obj(env, modname, sig_d, typ)
if not matches:
return None
if len(matches) > 1:
env.warn_node(
'more than one target found for cross-reference '
'%r: %s'%(target, ', '.join(m[0] for m in matches))
)
name, obj = matches[0]
if obj:
return make_refnode(
builder, fromdocname, obj[0], name, contnode, name
)
def get_objects(self):
for ns, info in self.data['namespaces'].items():
yield (ns, ns, 'namespace', info[0], 'namespace-' + ns, 0)
for refname, (docname, typ) in self.data['objects'].items():
yield (refname, refname, typ, docname, refname, 1)
def setup(app):
app.add_domain(CF3Domain)
|
the-stack_0_18264 | # Copyright (c) 2018 Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell,
# Kevin Swersky, Joshua B. Tenenbaum, Hugo Larochelle, Richars S. Zemel.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
"""
A batch iterator.
Usage:
for idx in BatchIterator(num=1000, batch_size=25):
inp_batch = inp_all[idx]
labels_batch = labels_all[idx]
train(inp_batch, labels_batch)
"""
from __future__ import division, print_function
import numpy as np
import threading
from fewshot.utils import logger
class IBatchIterator(object):
def __iter__(self):
"""Get iterable."""
return self
def next(self):
raise Exception("Not implemented")
def reset(self):
raise Exception("Not implemented")
pass
class BatchIterator(IBatchIterator):
def __init__(self,
num,
batch_size=1,
progress_bar=False,
log_epoch=10,
get_fn=None,
cycle=False,
shuffle=True,
stagnant=False,
seed=2,
num_batches=-1):
"""Construct a batch iterator.
Args:
data: numpy.ndarray, (N, D), N is the number of examples, D is the
feature dimension.
labels: numpy.ndarray, (N), N is the number of examples.
batch_size: int, batch size.
"""
self._num = num
self._batch_size = batch_size
self._step = 0
self._num_steps = int(np.ceil(self._num / float(batch_size)))
if num_batches > 0:
self._num_steps = min(self._num_steps, num_batches)
self._pb = None
self._variables = None
self._get_fn = get_fn
self.get_fn = get_fn
self._cycle = cycle
self._shuffle_idx = np.arange(self._num)
self._shuffle = shuffle
self._random = np.random.RandomState(seed)
if shuffle:
self._random.shuffle(self._shuffle_idx)
self._shuffle_flag = False
self._stagnant = stagnant
self._log_epoch = log_epoch
self._log = logger.get()
self._epoch = 0
if progress_bar:
self._pb = pb.get(self._num_steps)
pass
self._mutex = threading.Lock()
pass
def __iter__(self):
"""Get iterable."""
return self
def __len__(self):
"""Get iterable length."""
return self._num_steps
@property
def variables(self):
return self._variables
def set_variables(self, variables):
self._variables = variables
def get_fn(idx):
return self._get_fn(idx, variables=variables)
self.get_fn = get_fn
return self
def reset(self):
self._step = 0
def print_progress(self):
e = self._epoch
a = (self._step * self._batch_size) % self._num
b = self._num
p = a / b * 100
digit = int(np.ceil(np.log10(b)))
progress_str = "{:" + str(digit) + "d}"
progress_str = (progress_str + "/" + progress_str).format(int(a), int(b))
self._log.info(
"Epoch {:3d} Progress {} ({:5.2f}%)".format(e, progress_str, p))
pass
def next(self):
"""Iterate next element."""
self._mutex.acquire()
try:
# Shuffle data.
if self._shuffle_flag:
self._random.shuffle(self._shuffle_idx)
self._shuffle_flag = False
# Read/write of self._step stay in a thread-safe block.
if not self._cycle:
if self._step >= self._num_steps:
raise StopIteration()
# Calc start/end based on current step.
start = self._batch_size * self._step
end = self._batch_size * (self._step + 1)
# Progress bar.
if self._pb is not None:
self._pb.increment()
# Epoch record.
if self._cycle:
if int(end / self._num) > int(start / self._num):
self._epoch += 1
# Increment step.
if not self._stagnant:
self._step += 1
# Print progress
if self._log_epoch > 0 and self._step % self._log_epoch == 0:
self.print_progress()
finally:
self._mutex.release()
if not self._cycle:
end = min(self._num, end)
idx = np.arange(start, end)
idx = idx.astype("int")
if self.get_fn is not None:
return self.get_fn(idx)
else:
return idx
else:
start = start % self._num
end = end % self._num
if end > start:
idx = np.arange(start, end)
idx = idx.astype("int")
idx = self._shuffle_idx[idx]
else:
idx = np.concatenate([np.arange(start, self._num), np.arange(0, end)])
idx = idx.astype("int")
idx = self._shuffle_idx[idx]
# Shuffle every cycle.
if self._shuffle:
self._shuffle_flag = True
if self.get_fn is not None:
return self.get_fn(idx)
else:
return idx
pass
if __name__ == "__main__":
b = BatchIterator(
400,
batch_size=32,
progress_bar=False,
get_fn=lambda x: x,
cycle=False,
shuffle=False)
for ii in b:
print(ii)
b.reset()
for ii in b:
print(ii)
|
the-stack_0_18265 | # Initialize databases and counters, and go to US GPO bulk data repository site.
# Open and read a 116th Congress House Bill XML file as part of PY4E Capstone project
# Crawl through legislation and insert bill data into databases for eventual visualization.
# /Users/tim/Documents/PY4E/capstone/DataAnalysisProject/GPOBulkDataCrawl/116-HR-billcrawl-v1.py
from urllib.request import Request, urlopen
import urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
import sqlite3
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Create ”HRBills", “Origin”, "LegislationType", “Sponsors” (many to many
# mapping), and ”Roles" tables.
# SQL: Create tables if they do not already exist.
conn = sqlite3.connect('116-HR-net.sqlite')
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Origin')
cur.execute('DROP TABLE IF EXISTS Roles')
cur.execute('DROP TABLE IF EXISTS LegislationType')
# for primary keys update SQL command to:
# id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE
cur.execute('''CREATE TABLE IF NOT EXISTS HRBills
(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
legislationtype_id INTEGER, origin_id INTEGER,
legNo TEXT, congressNo TEXT, legTitle TEXT, Sponsors TEXT,
CoSponsors TEXT, attemptParse INTEGER, parseSpon INTEGER, parseCoSpon INTEGER)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Origin
(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
chamber TEXT UNIQUE)''')
cur.execute('''CREATE TABLE IF NOT EXISTS LegislationType
(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
legType TEXT UNIQUE)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Sponsors
(hrbill_id INTEGER, representative_id INTEGER, role_id INTEGER,
PRIMARY KEY (hrbill_id, representative_id))''')
cur.execute('''CREATE TABLE IF NOT EXISTS Roles
(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, role TEXT)''')
cur.execute('INSERT INTO Origin (chamber) VALUES ("Senate")')
cur.execute('INSERT INTO Origin (chamber) VALUES ("House")')
cur.execute('INSERT INTO Origin (chamber) VALUES ("Joint")')
cur.execute('INSERT INTO Roles (role) VALUES ("sponsor")')
cur.execute('INSERT INTO Roles (role) VALUES ("co-sponsor")')
# Legislation type: HR (House Bill), S (Senate Bill), HJRES (House Joint Resolution),
# SJRES (Senate Joint Resolution), HCONRES (House Concurrent Resolution), SCONRES
# (Senate Concurrent Resolution), HRES (House Simple Resolution), SRES (Senate Simple Resolution)
cur.execute('INSERT INTO LegislationType (legType) VALUES ("HR")')
cur.execute('INSERT INTO LegislationType (legType) VALUES ("S")')
cur.execute('INSERT INTO LegislationType (legType) VALUES ("HJRES")')
cur.execute('INSERT INTO LegislationType (legType) VALUES ("SJRES")')
cur.execute('INSERT INTO LegislationType (legType) VALUES ("HCONRES")')
cur.execute('INSERT INTO LegislationType (legType) VALUES ("SCONRES")')
conn.commit()
# Initialize variables. If there are records in HRBills, then NNNN = the max id in table
errCount = 0
# recordInsert = 0
# commitflag = False
many = 0
cur.execute('SELECT max(id), legNo FROM HRBills')
try:
row = cur.fetchone()
# When there are no records in the HRBills table, fetchone returns a tuple.
# Will have to check whether a tuple is returned when there is a record in the table
if row[0] is None :
NNNN = 0
else:
NNNN = int(row[1])
except:
NNNN = 0
# Create dictionary of members, origin and legislation type indexes:
# memberindx(bioguideID) = {id}
# originindx[chamber] = {id}
# legistypeindx[legtype] = {id}
memberindx = dict()
cur.execute('SELECT id, bioguideID FROM Representatives')
for rep in cur:
id = rep[0]
bioguideID = rep[1]
memberindx[bioguideID] = id
# print("memberindx:\n", memberindx, "\n\n")
originindx = dict()
cur.execute('SELECT id, chamber from Origin')
for body in cur:
id = body[0]
chamber = body[1]
originindx[chamber] = id
# print("originindx:\n", originindx, "\n\n")
legistypeindx = dict()
cur.execute('SELECT id, legtype FROM LegislationType')
for leg in cur:
id = leg[0]
legtype = leg[1]
legistypeindx[legtype] = id
# print("legistypeindx:\n", legistypeindx, "\n\n")
# Set prefix for the XML URL that we will open:
# https://www.govinfo.gov/bulkdata/BILLSTATUS/116/hr/BILLSTATUS-116hrNNNN.xml
# Broke the URL into multiple parts, in case we want ot latr modify the program to
# analyze Senate and Joint bills and resolutions from other Congressional sessions.
Base_URL = "https://www.govinfo.gov/bulkdata/BILLSTATUS/116/hr/"
StatusType = "BILLSTATUS-"
CongressNo = "116"
Chamber = "hr"
while True:
if (many <= 1):
i = input("How many Congressional House Bills do you want to parse? ")
if (len(i) < 1): break
many = int(i)
else:
many = many - 1
# Open and read XML file: https://www.govinfo.gov/bulkdata/BILLSTATUS/116/hr/BILLSTATUS-116hrNNNN.xml
NNNN = NNNN + 1
Article = str(NNNN)
filename = StatusType + CongressNo + Chamber + Article + ".xml"
URLname = Base_URL + filename
print("\n\n\*** Program checkpoint 1 ***\nOpening URL: ", URLname, "\n\n")
try:
req = Request(URLname, headers={'User-Agent': 'XYZ/3.0'})
webpage = urlopen(req, timeout=20)
# webpage is Type: <class 'http.client.HTTPResponse'>
except KeyboardInterrupt:
print('')
print('Program interrupted by user...')
break
except Exception as e:
print("***** Unable to retrieve or parse XML page", URLname)
print("***** Error", e, "\n\n")
errCount = errCount + 1
if errCount > 10 : break
continue
errCount = 0
data = webpage.read()
print('\n\n*** Program checkpoint 2 ***\nRetrieved', len(data), 'characters of type ', type(data))
# data is Type bytes
# print(data.decode())
data = data.decode()
print('Decoded', len(data), 'characters of type ', type(data), '\n\n')
tree = ET.fromstring(data)
# print("\n\n*** Program checkpoint 3 ***\nXML tree type: ", type(tree))
# print("Tree length: ", len(tree))
# print(" Bill Number: ", tree[0][0].text, "\n\n")
# findall() returns a list. The variable "bill" will be a list of length 1.
# The list contains a single element object.
bill = tree.findall("bill")
# print("Bill list type: ", type(bill))
# print("Length of bill list: ", len(bill))
# print("Bill contents (list): ")
# print(bill, "\n\n")
billinfo = bill[0]
# Legislation type: bill, resolution = billStatus/bill/billType
# Originating chamber: Senate, House, Joint = billStatus/bill/originChamber
# Legislative Number = billStatus/bill/billNumber
# Congressional Session = billStatus/bill/congress
# Legislation Title = billStatus/bill/title
legitype = billinfo.find("billType").text
legitypeid = legistypeindx[legitype]
chamber = billinfo.find("originChamber").text
chamberid = originindx[chamber]
billNumber = billinfo.find("billNumber").text
session = billinfo.find("congress").text
title = billinfo.find("title").text
print("\n\n*** Program checkpoint 4 ***")
print("Bill information: ", legitypeid, chamberid, billNumber, session, title, "\n\n")
# Sponsor(s) = billStatus/bill/sponsors/item/bioguideId
# Co-sponsor(s) = billStatus/bill/cosponsors/item/bioguideId
# Note: there may be more than one sponsor and co-sponsor
sponsorstree = billinfo.find("sponsors")
sponsorstr = ET.tostring(sponsorstree)
# print("sponsors node type: ", type(sponsorstree))
# print("sponsors node content: \n", sponsorstree, "\n\n")
sponsors = sponsorstree.findall("item")
print("The number of sponsors found: ", len(sponsors), "\n")
for item in sponsors:
print("Sponsor: ", item.find("fullName").text)
print("\n\n")
cosponsorstree = billinfo.find("cosponsors")
cosponsorstr = ET.tostring(cosponsorstree)
# print("cosponsors node type: ", type(cosponsorstree))
# print("cosponsors node content: \n", cosponsorstree, "\n\n")
cosponsors = cosponsorstree.findall("item")
print("The number of co-sponsors found: ", len(cosponsors), "\n")
for item in cosponsors:
print("CoSponsor: ", item.find("fullName").text)
print("\n\n")
cur.execute('''INSERT INTO HRBills (legislationtype_id, origin_id, legNo, congressNo, legTitle, Sponsors, CoSponsors)
VALUES (?,?,?,?,?,?,?)''', (legitypeid, chamberid, billNumber, session, title, sponsorstr, cosponsorstr))
conn.commit()
conn.commit()
conn.close()
print("\n\n*** Program Complete ***\n\n")
|
the-stack_0_18268 | # coding=utf-8
import datetime
import random
import time
import wrapcache
from config import configCommon
from config.TicketEnmu import ticket
class checkUser:
def __init__(self, session):
self.session = session
def sendCheckUser(self):
"""
检查用户登录, 检查间隔为2分钟
:return:
"""
CHENK_TIME = 0.3
while 1:
time.sleep(0.1) # 防止cpu占用过高
configCommon.checkSleepTime(self.session) # 修复晚上查询线程休眠时,检查登录线程为休眠,造成快豆迅速消耗
if wrapcache.get("user_time") is None:
check_user_url = self.session.urls["check_user_url"]
data = {"_json_att": ""}
check_user = self.session.httpClint.send(check_user_url, data)
if check_user.get("data", False):
check_user_flag = check_user["data"]["flag"]
if check_user_flag is True:
wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME)
else:
if check_user['messages']:
print(ticket.LOGIN_SESSION_FAIL.format(check_user['messages']))
self.session.call_login()
wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME)
else:
print(ticket.LOGIN_SESSION_FAIL.format(check_user['messages']))
self.session.call_login()
wrapcache.set("user_time", datetime.datetime.now(), timeout=random.randint(60, 80) * CHENK_TIME)
|
the-stack_0_18269 | import carbonate.fnv1a
import bisect
class ConsistentHashRing(object):
def __init__(self, nodes, replica_count=100):
self.ring = []
self.nodes = set()
self.replica_count = replica_count
for node in nodes:
self.add_node(node)
def compute_ring_position(self, key):
return carbonate.fnv1a.fnv1a(key)
def add_node(self, node):
self.nodes.add(node)
(server, _, port) = node
for i in range(self.replica_count):
replica_key = "%d-%s:%u" % (i, server, port)
position = self.compute_ring_position(replica_key)
entry = (position, node)
bisect.insort(self.ring, entry)
def remove_node(self, node):
self.nodes.discard(node)
self.ring = [entry for entry in self.ring if entry[1] != node]
def get_node(self, key):
assert self.ring
node = None
node_iter = self.get_nodes(key)
node = node_iter.next()
node_iter.close()
return node
def get_nodes(self, key):
assert self.ring
nodes = set()
position = self.compute_ring_position(key)
search_entry = (position, None)
index = bisect.bisect_left(self.ring, search_entry) % len(self.ring)
last_index = (index - 1) % len(self.ring)
while len(nodes) < len(self.nodes) and index != last_index:
next_entry = self.ring[index]
(position, next_node) = next_entry
if next_node not in nodes:
nodes.add(next_node)
yield next_node
index = (index + 1) % len(self.ring)
|
the-stack_0_18270 | import ee
from ..image_operation import ImageOperation
from sepal.ee.image import select_and_add_missing
def mask_shadows(mosaic_def, collection):
reduced = collection.select('shadowScore')\
.reduce(ee.Reducer.percentile([0, 50, 100]).combine(ee.Reducer.stdDev(), '', True))
shadowScoreMedian = select_and_add_missing(reduced, ['shadowScore_p50'])
shadowScoreMax = select_and_add_missing(reduced, ['shadowScore_p100'])
darkOutlierThreshold = shadowScoreMedian.multiply(0.7) # Outlier if it's a lot darker than the median
return collection.map(
lambda image: _MaskShadows(image, mosaic_def).apply(shadowScoreMax, darkOutlierThreshold, reduced))
class _MaskShadows(ImageOperation):
def __init__(self, image, mosaic_def):
super(_MaskShadows, self).__init__(image)
self.mosaic_def = mosaic_def
def apply(self, shadowScoreMax, darkOutlierThreshold, reduced):
def not_outlier(band, minDiff):
return reduced.expression('abs(band - median) <= max(2 * stdDev, minDiff)', {
'band': self.image.select(band),
'median': select_and_add_missing(reduced, [band + '_p50']),
'stdDev': select_and_add_missing(reduced, [band + '_stdDev']),
'minDiff': minDiff
})
self.set('shadowThreshold',
'i.shadowThreshold * (1 - {shadowTolerance})', {
'shadowTolerance': self.mosaic_def.shadow_tolerance
})
self.set('shadowThreshold',
darkOutlierThreshold.max(self.toImage('shadowThreshold')))
self.setIf('shadowThreshold',
self.toImage('shadowThreshold').gt(shadowScoreMax),
shadowScoreMax)
mask = self.toImage('i.shadowScore >= i.shadowThreshold') \
.And(not_outlier('shadowScore', 300)) \
.Or(self.toImage('cloud'))
return self.image.updateMask(mask)
|
the-stack_0_18272 | '''gff2histogram.py - compute histograms from intervals in gff or bed format
=========================================================================
:Tags: Genomics Intervals GFF Summary
Purpose
-------
This script computes distributions of interval sizes, intersegmental
distances and interval overlap from a list of intervals in :term:`gff`
or :term:`bed` format.
The output will be written into separate files. Filenames are given by
``--ouput-filename-pattern``.
Available methods are:
hist
Output a histogram of interval sizes and distances between intervals
in nucleotides.
stats
Output summary statistics of interval sizes and distances between
intervals
values
Output distances, sizes, and overlap values to separate files.
all
all of the above.
Usage
-----
For example, a small gff file such as this (note that intervals need
to be sorted by position)::
chr19 processed_transcript exon 60105 60162 . - .
chr19 processed_transcript exon 60521 60747 . - .
chr19 processed_transcript exon 65822 66133 . - .
chr19 processed_transcript exon 66346 66416 . - .
chr19 processed_transcript exon 66346 66509 . - .
will give when called as::
cgat gff2histogram < in.gff
the following output files:
hist
Histogram of feature sizes and distances between adjacent features
+--------+----+--------+
|residues|size|distance|
+--------+----+--------+
|58.0 |1 |na |
+--------+----+--------+
|71.0 |1 |na |
+--------+----+--------+
|164.0 |1 |na |
+--------+----+--------+
|212.0 |na |1 |
+--------+----+--------+
|227.0 |1 |na |
+--------+----+--------+
|312.0 |1 |na |
+--------+----+--------+
|358.0 |na |1 |
+--------+----+--------+
|5074.0 |na |1 |
+--------+----+--------+
stats
Summary statistics of the distribution of feature size and distance between
adjacent features.
+--------+----+--------+---------+---------+--------+---------+---------+--------+---------+
|data |nval|min |max |mean |median |stddev |sum |q1 |q3 |
+--------+----+--------+---------+---------+--------+---------+---------+--------+---------+
|size |5 |58.0000 |312.0000 |166.4000 |164.0000|95.6339 |832.0000 |71.0000 |227.0000 |
+--------+----+--------+---------+---------+--------+---------+---------+--------+---------+
|distance|3 |212.0000|5074.0000|1881.3333|358.0000|2258.3430|5644.0000|212.0000|5074.0000|
+--------+----+--------+---------+---------+--------+---------+---------+--------+---------+
overlaps
A file with features that overlap other features, here::
chr19 processed_transcript exon 66346 66416 . - . chr19 processed_transcript exon 66346 66509 . - .
Type::
python gff2histogram.py --help
for command line help.
Command line options
--------------------
'''
import sys
import cgatcore.experiment as E
import cgat.GTF as GTF
import cgat.Bed as Bed
import cgat.Histogram as Histogram
import cgat.Stats as Stats
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.ArgumentParser(description=__doc__)
parser.add_argument("--version", action='version', version="1.0")
parser.add_argument("-b", "--bin-size", dest="bin_size", type=str,
help="bin size.")
parser.add_argument("--min-value", dest="min_value", type=float,
help="minimum value for histogram.")
parser.add_argument(
"--max-value", dest="max_value", type=float,
help="maximum value for histogram.")
parser.add_argument(
"--no-empty-bins", dest="no_empty_bins", action="store_true",
help="do not display empty bins.")
parser.add_argument(
"--with-empty-bins", dest="no_empty_bins", action="store_false",
help="display empty bins.")
parser.add_argument(
"--ignore-out-of-range", dest="ignore_out_of_range",
action="store_true",
help="ignore values that are out of range (as opposed to truncating "
"them to range border.")
parser.add_argument("--missing-value", dest="missing_value", type=str,
help="entry for missing values .")
parser.add_argument("--use-dynamic-bins", dest="dynamic_bins",
action="store_true",
help="each value constitutes its own bin.")
parser.add_argument("--format", dest="format", type=str,
choices=("gff", "gtf", "bed"),
help="input file format .")
parser.add_argument("--method", dest="methods", type=str,
action="append",
choices=("all", "hist", "stats", "overlaps", "values"),
help="methods to apply .")
parser.add_argument("--output-section", dest="output_section", type=str,
choices=("all", "size", "distance"),
help="data to compute .")
parser.set_defaults(
no_empty_bins=True,
bin_size=None,
dynamic_bins=False,
ignore_out_of_range=False,
min_value=None,
max_value=None,
nonull=None,
missing_value="na",
output_filename_pattern="%s",
methods=[],
output_section="all",
format="gff",
)
(args) = E.start(parser, add_output_options=True)
if "all" in args.methods:
args.methods = ("hist", "stats", "overlaps")
if not args.output_filename_pattern:
args.output_filename_pattern = "%s"
if len(args.methods) == 0:
raise ValueError(
"please provide counting method using --method option")
if args.format in ("gff", "gtf"):
gffs = GTF.iterator(args.stdin)
elif args.format == "bed":
gffs = Bed.iterator(args.stdin)
values_between = []
values_within = []
values_overlaps = []
if "overlaps" in args.methods:
if not args.output_filename_pattern:
args.output_filename_pattern = "%s"
outfile_overlaps = E.open_output_file("overlaps")
else:
outfile_overlaps = None
last = None
ninput, noverlaps = 0, 0
for this in gffs:
ninput += 1
values_within.append(this.end - this.start)
if last and last.contig == this.contig:
if this.start < last.end:
noverlaps += 1
if outfile_overlaps:
outfile_overlaps.write("%s\t%s\n" % (str(last), str(this)))
values_overlaps.append(
min(this.end, last.end) - max(last.start, this.start))
if this.end > last.end:
last = this
continue
else:
values_between.append(this.start - last.end)
# if this.start - last.end < 10:
# print str(last)
# print str(this)
# print "=="
values_overlaps.append(0)
last = this
if "hist" in args.methods:
outfile = E.open_output_file("hist")
h_within = Histogram.Calculate(
values_within,
no_empty_bins=args.no_empty_bins,
increment=args.bin_size,
min_value=args.min_value,
max_value=args.max_value,
dynamic_bins=args.dynamic_bins,
ignore_out_of_range=args.ignore_out_of_range)
h_between = Histogram.Calculate(
values_between,
no_empty_bins=args.no_empty_bins,
increment=args.bin_size,
min_value=args.min_value,
max_value=args.max_value,
dynamic_bins=args.dynamic_bins,
ignore_out_of_range=args.ignore_out_of_range)
if "all" == args.output_section:
outfile.write("residues\tsize\tdistance\n")
combined_histogram = Histogram.Combine(
[h_within, h_between], missing_value=args.missing_value)
Histogram.Write(outfile, combined_histogram, nonull=args.nonull)
elif args.output_section == "size":
outfile.write("residues\tsize\n")
Histogram.Write(outfile, h_within, nonull=args.nonull)
elif args.output_section == "distance":
outfile.write("residues\tdistance\n")
Histogram.Write(outfile, h_between, nonull=args.nonull)
outfile.close()
if "stats" in args.methods:
outfile = E.open_output_file("stats")
outfile.write("data\t%s\n" % Stats.Summary().getHeader())
if args.output_section in ("size", "all"):
outfile.write("size\t%s\n" % str(Stats.Summary(values_within)))
if args.output_section in ("distance", "all"):
outfile.write("distance\t%s\n" %
str(Stats.Summary(values_between)))
outfile.close()
if "values" in args.methods:
outfile = E.open_output_file("distances")
outfile.write("distance\n%s\n" % "\n".join(map(str, values_between)))
outfile.close()
outfile = E.open_output_file("sizes")
outfile.write("size\n%s\n" % "\n".join(map(str, values_within)))
outfile.close()
outfile = E.open_output_file("overlaps")
outfile.write("overlap\n%s\n" % "\n".join(map(str, values_overlaps)))
outfile.close()
E.info("ninput=%i, ndistance=%i, nsize=%i, noverlap=%i" %
(ninput,
len(values_between),
len(values_within),
noverlaps))
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_0_18274 | from elegy.initializers import TruncatedNormal
from elegy.types import Initializer
from elegy import module
import typing as tp
import jax.numpy as jnp
import haiku as hk
import numpy as np
class Linear(module.Module):
"""Linear module."""
w: np.ndarray
b: np.ndarray
def __init__(
self,
output_size: int,
with_bias: bool = True,
w_init: tp.Optional[Initializer] = None,
b_init: tp.Optional[Initializer] = None,
**kwargs
):
"""
Constructs the Linear module.
Arguments:
output_size: Output dimensionality.
with_bias: Whether to add a bias to the output.
w_init: Optional initializer for weights. By default, uses random values
from truncated normal, with stddev `1 / sqrt(fan_in)`. See
https://arxiv.org/abs/1502.03167v3.
b_init: Optional initializer for bias. By default, zero.
kwargs: Additional keyword arguments passed to Module.
"""
super().__init__(**kwargs)
self.input_size = None
self.output_size = output_size
self.with_bias = with_bias
self.w_init = w_init
self.b_init = b_init or jnp.zeros
def call(self, inputs: np.ndarray) -> np.ndarray:
""""""
if not inputs.shape:
raise ValueError("Input must not be scalar.")
input_size = self.input_size = inputs.shape[-1]
output_size = self.output_size
dtype = jnp.float32
w_init = self.w_init
if w_init is None:
stddev = 1.0 / np.sqrt(self.input_size)
w_init = TruncatedNormal(stddev=stddev)
w = self.add_parameter(
"w", [input_size, output_size], dtype, initializer=w_init
)
inputs = jnp.asarray(inputs, self.dtype)
w = jnp.asarray(w, self.dtype)
out = jnp.dot(inputs, w)
if self.with_bias:
b = self.add_parameter(
"b", [self.output_size], dtype, initializer=self.b_init
)
b = jnp.broadcast_to(b, out.shape)
b = jnp.asarray(b, self.dtype)
out = out + b
return out
|
the-stack_0_18275 | import os
import threading
import time
import Queue
import cv2
import pygame
from PIL import Image
from util.HiloCaptura import WriterCaptura
from util.config import configuration
from util.images2gif import writeGif
from util.logger import clienteLog
# solo para emular el GPIO
if os.name == 'poxis':
import RPi.GPIO as GPIO
else:
from emulators import emuladores_debug
GPIO = emuladores_debug.emulatorGPIO()
#Constantes GPIO
class CameraDuxmanV2(object):
Loggger = None
Configuration = None
ret = None
CurrentFrame = None
ColaDatos = None
hiloCaptura = None
def __init__(self):
cliente = clienteLog()
self.Logger = cliente.InicializaLogConsole()
self.Configuration = configuration(self.Logger)
self.MainProcess()
## Defino funciones
def EncenderLed(self, Port, Timeout):
GPIO.output(Port, True) ## Enciendo led
if Timeout > 0:
time.sleep(Timeout) ## Espero el tiempo indicado
## Fin EncenderLed
def ApagarLed(self, Port):
GPIO.output(Port, False) ## Apago el led
## Fin ApagarLed
## Funcion EncenderYApagarLed
## Enciende un led y espera un tiempo determinado
## Si es 0 no espera
def EncenderYApagarLed(self, Port, Timeout):
self.EncenderLed(Port, Timeout)
self.ApagarLed(Port)
##Fin EncenderYApagarLed
def borrar(self):
self.Configuration.Pantalla.screen.fill((0, 0, 0))
def damePosicionCentro(self,tam):
Ancho = tam[0]
Alto = tam[1]
CentroHor = 400 - (Ancho / 2)
CentroVer = 240 - (Alto / 2)
return (CentroHor, CentroVer);
def Texto(self,texto, tam):
self.borrar()
font = pygame.font.Font("./resources/Fine.ttf", tam)
text_surface = font.render(texto, True, (255, 255, 255))
textSize = font.size(texto)
posicion = self.damePosicionCentro(textSize)
self.Configuration.Pantalla.screen.blit(text_surface, posicion)
pygame.display.update()
def Setup(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.Configuration.BOTON_GPIO, GPIO.IN)
GPIO.setup(self.Configuration.LUZ_BOTON_GPIO, GPIO.OUT)
GPIO.setup(self.Configuration.LED_VERDE_GPIO, GPIO.OUT)
GPIO.setup(self.Configuration.LED_NARANJA_GPIO, GPIO.OUT)
GPIO.setup(self.Configuration.LED_ROJO_GPIO, GPIO.OUT)
GPIO.setup(self.Configuration.FLASH_GPIO, GPIO.OUT)
## Fin PrepararGPIO
##NombreFichero
def dameNombreFicheroCapturaBase(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "../IMAGENES/JPG/IMG_%s" % (timestr)
return filename
def dameNombreFicheroGif(self):
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = "../IMAGENES/GIFS/IMG_%s.gif" % (timestr)
return filename
##FinNombreFichero
def tomarFoto(self,texto,tamtexto,led_aviso,timeout,nombrebasecaptura, numeroimagen):
nombrefotofinal = nombrebasecaptura + '_' + format(numeroimagen, '02d') + '.jpg'
self.Logger.info("capturamos una foto" + nombrefotofinal)
self.Texto(texto, tamtexto) # Muestro el texto por pantalla
self.EncenderYApagarLed(led_aviso, timeout) ## Enciendo el led
#self.Configuration.Camera.capture(nombrefotofinal) # capturo la imagen
self.captureImagen(nombrefotofinal)
def generateGIF(self, numimages, nombrebasecaptura, nombregif,duracion):
self.Logger.info("Creamos hilo gif ")
# Esto le cuesta bastante lo ejecutamos en un hilo en 2 plano ya acabara
producer = threading.Thread(target=self.createGIF(numimages, nombrebasecaptura, nombregif,duracion), name="GIFManagerThread")
# Indicamos que es un daemon
producer.daemon = True
#iniciamos el hilo
producer.start()
def createGIF(self, numimages, nombrebasecaptura, nombregif,duracion):
imgforgif = []
self.Logger.info("Creamos gif " + nombregif )
# Cargamos las imagenes en el vector
for i in range(numimages):
imgforgif.append(Image.open(nombrebasecaptura + format(i + 1, '02d') + '.jpg'))
# escribimos el gif en el disco
writeGif(nombregif, imgforgif, duration=duracion)
def captureImagen(self,nombrefotofinal):
if( self.Configuration.TipoCamara == "RPI" ):
self.Configuration.Camera.capture(nombrefotofinal) # capturo la imagen por raspberry
else:
self.Configuration.Camera.imwrite(nombrefotofinal,self.ColaDatos.get(True,1) )
def hiloCapturaWindows(self):
self.ColaDatos = Queue.LifoQueue()
self.hiloCaptura = WriterCaptura(self.ColaDatos, self.Configuration.Camera)
self.hiloCaptura.start()
def MainProcess(self):
try:
self.Setup()
self.Texto("FOTOMATON DUX", 90)
time.sleep(3)
self.Texto("Pulsa el Boton", 90)
while True:
if (GPIO.input(self.Configuration.BOTON_GPIO)):
NombreBaseCaptura = self.dameNombreFicheroCapturaBase()
NombreGIF = self.dameNombreFicheroGif()
self.Texto("Preparando...", 100)
self.hiloCapturaWindows()
time.sleep(1)
self.Texto("Todo listo", 100)
self.Logger.info("Encendemos el flash")
# camera.start_preview()
self.ApagarLed(self.Configuration.LUZ_BOTON_GPIO) ## Apago el la luz del boton
self.Logger.info("Button Pressed")
self.EncenderLed(self.Configuration.FLASH_GPIO, 0) ## Enciendo el Flash
self.tomarFoto('3',250,self.Configuration.LED_ROJO_GPIO,3,NombreBaseCaptura,1)
self.tomarFoto('2',250, self.Configuration.LED_NARANJA_GPIO, 3, NombreBaseCaptura, 2)
self.tomarFoto('1',250, self.Configuration.LED_VERDE_GPIO, 3, NombreBaseCaptura, 3)
self.tomarFoto('SONRIE :)',100, self.Configuration.LED_VERDE_GPIO, 3, NombreBaseCaptura, 4)
self.hiloCaptura.Stop = True
self.ApagarLed(self.Configuration.FLASH_GPIO)
self.Logger.info("flash apagado")
self.Texto("Guardando ...", 100)
self.Logger.info("Guardando las fototos")
self.generateGIF(NombreBaseCaptura,4,NombreGIF, 0.5)
# camera.stop_preview()
self.Texto("Pulsa el Boton", 90)
self.Logger.info("Volvemos a empezar")
else:
self.EncenderLed(self.Configuration.LUZ_BOTON_GPIO, 0) ## Enciendo el LUZ_BOTON_GPIO
self.ApagarLed(self.Configuration.LED_ROJO_GPIO) ## Apago el led Rojo
self.ApagarLed(self.Configuration.LED_NARANJA_GPIO) ## Apago el led Naranja
self.ApagarLed(self.Configuration.LED_VERDE_GPIO) ## Apago el led Verde
##Fin if Boton
##Fin While
except Exception as ex:
self.Logger.critical( str(ex) )
finally:
self.Logger.info( "Ejecucion finalizada" )
GPIO.cleanup()
if __name__ == "__main__":
mainprogram = CameraDuxmanV2()
mainprogram.MainProcess()
mainprogram.Logger.info("--------------------<< END >>--------------------")
|
the-stack_0_18276 | """
A silly demonstration of how to use the Apple remote.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
from pyglet.gl import *
import sys
class MainWindow(pyglet.window.Window):
def __init__(self):
super().__init__(visible=False)
self.set_caption('Apple Remote Example')
# Look for the Apple Remote device.
remote = pyglet.input.get_apple_remote()
if not remote:
print('Apple IR Remote not available.')
sys.exit(0)
# Open the remote in exclusive mode so that pressing the remote
# buttons does not activate Front Row, change volume, etc. while
# the remote is being used by our program.
remote.open(self, exclusive=True)
# We push this class onto the remote's event handler stack so that
# the on_button_press and on_button_release methods which we define
# below will be called for the appropriate remote events.
remote.push_handlers(self)
self.carousel = Carousel()
self.setup_opengl()
pyglet.clock.schedule_interval(self.update, 1 / 60.0)
# Event handler for Apple Remote button press events.
# The button parameter is a string specifying the button that was pressed.
def on_button_press(self, button):
print('on_button_press', button)
if button == 'up':
self.carousel.scroll_up()
elif button == 'down':
self.carousel.scroll_down()
elif button == 'left':
self.carousel.step_left()
elif button == 'right':
self.carousel.step_right()
elif button == 'left_hold':
self.carousel.rotate_left()
elif button == 'right_hold':
self.carousel.rotate_right()
elif button == 'select' or button == 'select_hold':
self.carousel.swap_left()
elif button == 'menu' or button == 'menu_hold':
self.carousel.swap_right()
# Event handler for Apple Remote button release events.
# The button parameter is a string specifying the button that was released.
def on_button_release(self, button):
print('on_button_release', button)
if button == 'left_hold':
self.carousel.stop_rotating()
elif button == 'right_hold':
self.carousel.stop_rotating()
def on_draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
gluLookAt(0, 3, -12, 0, 3, 0, 0, 1, 0)
self.carousel.draw()
def on_resize(self, width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
aspect = width / float(height)
glFrustum(-1, 1, -1.8 / aspect, 0.2 / aspect, 1, 100)
glMatrixMode(GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def setup_opengl(self):
glClearColor(1, 1, 1, 1)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def update(self, dt):
self.carousel.update(dt)
class Carousel:
"""A rotating collection of labeled tiles."""
def __init__(self):
self.num_tiles = 14
self.index = 0
self.float_index = 0.0
self.float_increment = 1.0 / self.num_tiles
self.angle = 0
self.index_diff = 0
self.is_rotating = False
self.speed = 4 * self.num_tiles
# Create the tiles in the carousel.
self.tiles = list()
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 205, 205), (128, 0, 128), (255, 165, 0)]
class Tile:
value = 0
color = [255, 255, 255]
for i in range(self.num_tiles):
tile = Tile()
tile.value = i % 26
tile.color = colors[i % len(colors)]
self.tiles.append(tile)
# Create glyphs for the characters displayed on the tiles.
font = pyglet.font.load('Courier', 64)
self.glyphs = font.get_glyphs('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
def scroll_up(self):
"""Increment the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value + 1) % 26
def scroll_down(self):
"""Decrement the character displayed on the main tile."""
self.tiles[self.index].value = (self.tiles[self.index].value - 1) % 26
def swap_left(self):
"""Swap the two left tiles."""
i = self.index
j = (self.index - 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def swap_right(self):
"""Swap the two right tiles."""
i = self.index
j = (self.index + 1) % self.num_tiles
self.tiles[i], self.tiles[j] = self.tiles[j], self.tiles[i]
def step_left(self):
"""Rotate the carousel one tile to the left."""
self.direction = -1
self.index_diff += 1.0
def step_right(self):
"""Rotate the carousel one tile to the right."""
self.direction = 1
self.index_diff += 1.0
def rotate_left(self):
"""Start the carousel rotating continuously to the left."""
self.is_rotating = True
self.direction = -1
def rotate_right(self):
"""Start the carousel rotating continuously to the right."""
self.is_rotating = True
self.direction = 1
def stop_rotating(self):
"""Stop continuous rotation and make sure we end up at a tile location."""
self.index_diff = round(self.float_index) - self.float_index
if self.index_diff < 0:
self.direction = -1
else:
self.direction = 1
self.index_diff = abs(self.index_diff)
def draw(self):
glPushMatrix()
glRotatef(-self.angle, 0, 1, 0)
for i in range(self.num_tiles):
self.draw_tile(i)
glPopMatrix()
def draw_tile(self, index):
angle = index * (360.0 / self.num_tiles)
glPushMatrix()
glRotatef(angle, 0, 1, 0)
glTranslatef(0, 0, -7.5)
glRotatef(-angle + self.angle, 0, 1, 0)
texture = self.glyphs[self.tiles[index].value].texture
vertex_list = pyglet.graphics.vertex_list(4, 'v2f',
('t3f', texture.tex_coords))
vertex_list.vertices[:] = [-1, -1, 1, -1, 1, 1, -1, 1]
# Draw tile background.
glColor3ub(*self.tiles[index].color)
vertex_list.draw(GL_QUADS)
# Draw tile label.
glBindTexture(texture.target, texture.id)
glEnable(texture.target)
glColor3ub(0, 0, 0)
vertex_list.vertices[:] = [.8, -.8, -.8, -.8, -.8, .8, .8, .8]
glTranslatef(0, 0, -.01)
vertex_list.draw(GL_QUADS)
glDisable(texture.target)
glPopMatrix()
def update(self, dt):
if self.is_rotating or self.index_diff:
increment = self.direction * self.speed * self.float_increment * dt
self.float_index = (self.float_index + increment) % self.num_tiles
if self.index_diff:
self.index_diff -= abs(increment)
if self.index_diff < 0:
self.index_diff = 0
self.float_index = round(self.float_index) % self.num_tiles
self.index = int(self.float_index)
self.is_rotating = False
self.angle = (self.float_index / self.num_tiles) * 360
window = MainWindow()
window.clear()
window.flip()
window.set_visible(True)
pyglet.app.run()
|
the-stack_0_18279 | import logging
from pathlib import Path
import pytest
import responses
from authlib.integrations.base_client import OAuthError
from requests import Session
from toucan_connectors.common import HttpError
from toucan_connectors.oauth2_connector.oauth2connector import NoOAuth2RefreshToken, OAuth2Connector
from toucan_connectors.salesforce.salesforce_connector import (
NoCredentialsError,
SalesforceApiError,
SalesforceConnector,
)
import_path = 'toucan_connectors.salesforce.salesforce_connector'
def test_build_authorization_url(mocker, sc):
"""
It should proxy OAuth2Connectors methods
"""
mock_oauth2_connector = mocker.Mock(spec=OAuth2Connector)
mock_oauth2_connector.client_id = 'test_client_id'
mock_oauth2_connector.client_secret = 'test_client_secret'
sc._oauth2_connector = mock_oauth2_connector
sc.build_authorization_url()
mock_oauth2_connector.build_authorization_url.assert_called()
def test_retrieve_tokens(mocker, sc):
"""
Check that the retrieve_tokens method properly returns
tokens
"""
mock_oauth2_connector = mocker.Mock(spec=OAuth2Connector)
mock_oauth2_connector.client_id = 'test_client_id'
mock_oauth2_connector.client_secret = 'test_client_secret'
sc._oauth2_connector = mock_oauth2_connector
sc.retrieve_tokens('bla')
mock_oauth2_connector.retrieve_tokens.assert_called()
@responses.activate
def test_make_request(sc, ds):
"""Check that the make_requests correctly calls the endpoint and return records"""
responses.add(
responses.GET,
'https://salesforce.is.awsome/services/data/v39.0/query',
json={
'attributes': ['a', 'b'],
'records': [{'id': 1, 'name': 'a'}, {'id': 2, 'name': 'b'}],
},
)
resp = sc.make_request(
Session(), ds, 'https://salesforce.is.awsome', 'services/data/v39.0/query'
)
assert resp == {
'attributes': ['a', 'b'],
'records': [{'id': 1, 'name': 'a'}, {'id': 2, 'name': 'b'}],
}
def test_get_status_no_secrets(sc, remove_secrets):
"""
Check that the connection status is false when no secret is defined
"""
status = sc.get_status().status
logging.getLogger(__name__).info(f'status {status}')
assert sc.get_status().status is False
def test_get_status_secrets_error(mocker, sc):
"""
Check that the connector status is false if the
secret manager is not able to retrieve the access token
"""
mocker.patch(f'{import_path}.OAuth2Connector.get_access_data', side_effect=Exception)
assert sc.get_status().status is False
def test_get_status_secrets_auth_error(mocker, sc):
"""
Check that the connector status is false if the
secret manager is not able to retrieve the access token
"""
mocker.patch(f'{import_path}.OAuth2Connector.get_access_data', side_effect=OAuthError)
assert sc.get_status().status is False
def test_get_status_api_down(mocker, sc):
"""
Check that the connection status is false when the secret manager receives an httperror
"""
mocker.patch.object(SalesforceConnector, 'get_access_data', side_effect=HttpError)
assert sc.get_status().status is False
def test_get_status_ok(mocker, sc):
"""
Check that we get the connector status set to True if
the access token is correctly retrieved
"""
mocker.patch.object(
SalesforceConnector, 'get_access_data', return_value={'access_token': 'access_token'}
)
assert sc.get_status().status is True
def test_get_status_nok(mocker, sc):
mocker.patch.object(SalesforceConnector, 'get_access_data', return_value=None)
assert sc.get_status().status is False
def test_generate_rows(mocker, sc, ds, toys_results_p1, toys_results_p2):
"""Check that generate_rows handles pagination and records extraction"""
mocked_make_request = mocker.patch.object(
SalesforceConnector,
'make_request',
side_effect=[
toys_results_p1,
toys_results_p2,
],
)
res = sc.generate_rows(Session(), ds, 'https://salesforce.is.awsome', 'bla')
assert mocked_make_request.call_count == 2
assert res == [
{'Id': 'A111FA', 'Name': 'Magic Poney'},
{'Id': 'A111FB', 'Name': 'Wonderful Panther'},
{'Id': 'A111FC', 'Name': 'Lightling Lizard'},
]
def test_generate_rows_error(mocker, sc, ds, error_result):
"""Check that generate_rows handles errors while queryin the API"""
mocker.patch.object(SalesforceConnector, 'make_request', return_value=error_result)
with pytest.raises(SalesforceApiError):
sc.generate_rows(Session(), ds, 'https://salesforce.is.awsome', 'bla')
def test__retrieve_data_no_credentials(mocker, sc, ds, clean_p1):
mocker.patch.object(SalesforceConnector, 'get_access_data', return_value=None)
with pytest.raises(NoCredentialsError):
sc._retrieve_data(sc)
def test__retrieve_data(mocker, sc, ds, clean_p1):
"""Check that the connector is able to retrieve data from Salesforce API"""
secret_object = {
'access_token': 'shiny token',
'signature': 'shiny',
'scope': 'refresh_token api full',
'instance_url': 'https://salesforce.is.awsome',
'id': 'https://login.salesforce.com/id/00D09000007vcxHEAQ/00509000006bXeyAAE',
'token_type': 'Bearer',
'issued_at': '1621949493610',
'refresh_token': 'shiny token',
}
mocker.patch.object(SalesforceConnector, 'get_access_data', return_value=secret_object)
mocked_generate_rows = mocker.patch.object(
SalesforceConnector, 'generate_rows', return_value=clean_p1
)
res = sc._retrieve_data(ds)
assert mocked_generate_rows.call_count == 1
assert res.iloc[0]['Id'] == 'A111FA'
def test_get_secrets_form(mocker, sc):
"""Check that the doc for oAuth setup is correctly retrieved"""
mocker.patch(
'toucan_connectors.salesforce.salesforce_connector.os.path.dirname', return_value='fakepath'
)
mocker.patch.object(Path, 'read_text', return_value='<h1>Awesome Doc</h1>')
doc = sc.get_connector_secrets_form()
assert doc.documentation_md == '<h1>Awesome Doc</h1>'
def test__retrieve_data_no_secret(sc, ds, remove_secrets):
"""Checks that we have an exception as we secret was removed"""
with pytest.raises(NoOAuth2RefreshToken):
sc._retrieve_data(sc)
|
the-stack_0_18282 | from functools import wraps
from typing import Union, List, Tuple
from flask import request
from anubis.utils.auth import current_user
from anubis.utils.data import jsonify, _verify_data_shape
from anubis.utils.exceptions import AuthenticationError
from anubis.utils.http.https import error_response
def load_from_id(model, verify_owner=False):
"""
This flask decorator loads the id kwarg passed in by flask
and uses it to pull the sqlalchemy object corresponding to that id
>>> @app.route('/assignment/<string:id>')
>>> @require_user
>>> @load_from_id(Assignment)
>>> def view_function(assignment: Assignment):
>>> pass
If the verify_owner is true, then the sqlachemy object's owner
relationship (assuming it has one) will be checked against the
current logged in user.
:param model:
:param verify_owner:
:return:
"""
def wrapper(func):
@wraps(func)
def decorator(id, *args, **kwargs):
# Use the id from the view functions params to query for
# the object.
r = model.query.filter_by(id=id).first()
# If the sqlalchemy object was not found, then return a 400
if r is None:
return error_response("Unable to find"), 400
# If the verify_owner option is on, then
# check the object's owner against the currently
# logged in user.
if verify_owner and current_user().id != r.owner.id:
raise AuthenticationError()
return func(r, *args, **kwargs)
return decorator
return wrapper
def json_response(func):
"""
Wrap a route so that it always converts data
response to proper json.
@app.route('/')
@json
def test():
return {
'success': True
}
"""
@wraps(func)
def json_wrap(*args, **kwargs):
data = func(*args, **kwargs)
status_code = 200
if isinstance(data, tuple):
data, status_code = data
return jsonify(data, status_code)
return json_wrap
def json_endpoint(
required_fields: Union[List[str], List[Tuple], None] = None,
only_required: bool = False,
):
"""
Wrap a route so that it always converts data response to proper
json. This decorator will save a whole lot of time verifying
json body data.
The required fields should be a list of either strings or tuples.
If the required fields is a list of strings, then each of the
strings will be verified in the json body, and passed to the
view function as a kwarg.
>>> @app.route('/')
>>> @json_endpoint(['name')])
>>> def test(name, **_):
>>> return {
>>> 'success': True
>>> }
If the required fields are a list of tuples, then the first item
should be the string name of the field, then its type. When you
specify the type in a tuple, then that fields type will also
be verified in the json body.
>>> @app.route('/')
>>> @json_endpoint([('name', str)])
>>> def test(name: str, **_):
>>> return {
>>> 'success': True
>>> }
"""
def wrapper(func):
@wraps(func)
def json_wrap(*args, **kwargs):
# Get the content type header
content_type = request.headers.get("Content-Type", default="")
# Verify that the content type header was application json.
# If the content type header is not application/json, then
# flask will not parse the body of the request.
if not content_type.startswith("application/json"):
# If the content-type was not set properly, then we
# should hand back a 406 not acceptable error code.
return error_response("Content-Type header is not application/json"), 406
# After verifying that the content type header was set,
# then we can access the request json body
json_body: dict = request.json
# Build a list of the required field string values
_required_fields: List[str] = []
# If the required fields was set, then we
# need to verify that they exist in the json
# body, along with type checks if they were
# specified.
if required_fields is not None:
# Check required fields
for index, field in enumerate(required_fields):
# If field was a tuple, extract field name and required type.
required_type = None
if isinstance(field, tuple):
# If the tuple was more than two items, then
# we dont know how to handle.
if len(field) != 2:
pass
# Pull the field apart into the field and required type
field, required_type = field
# At this point, the tuple will have been parsed if it had one,
# so the field will always be a string. Add it to the running
# (fresh) list of required field string objects.
_required_fields.append(field)
# Make sure that the field is in the json body.
# If this condition is not met, then we will return
# a 406 not acceptable.
if field not in json_body:
# field missing, return error
# Not Acceptable
return error_response(f"Malformed requests. Missing field {field}."), 406
# If a type was specified, verify it
if required_type is not None:
# Do a type check on the json body field
if not isinstance(json_body[field], required_type):
# Not Acceptable
return error_response("Malformed requests. Invalid field type."), 406
# Give the positional args first,
# then the json data (in the order of
# the required fields), and lastly
# the kwargs that were passed in.
if required_fields is not None:
# We can optionally specify only_required to
# skip this step. Here we are adding the key
# values from the posted json to the kwargs
# of the function. This is potentially destructive
# as it will overwrite any keys already in the
# kwargs with the values in the json.
if not only_required:
for key, value in json_body.items():
if key not in _required_fields:
kwargs[key] = value
# Call the function while trying to maintain a
# logical order to the arguments
return func(
*args,
**{field: json_body[field] for field in _required_fields},
**kwargs,
)
# If there was no required fields specified, then we can just call the
# view function with the first argument being the json body.
return func(json_body, *args, **kwargs)
return json_wrap
return wrapper
def verify_shape(*shapes):
"""
This is the decorator form of the data shape verification function. It will validate the
arguments of a function before calling it. You can just sequentially provide the expected shapes
of the arguments. It will return error_response's if there was a problem validating something.
:param shapes: sequence of argument shapes
:return: error_response on error
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# We should reject if we're not able to use all our shapes
if len(args) < len(shapes):
return error_response("Missing fields"), 406
# Verify our argument shapes
for data, shape in zip(args, shapes):
r, e = _verify_data_shape(data, shape)
if not r:
return error_response("Shape invalid {}".format(e)), 406
# Shapes pass, run function
return func(*args, **kwargs)
return wrapper
return decorator
|
the-stack_0_18284 | # -*- coding : utf-8 -*-
import urllib.request
import urllib.parse
import requests
import json
import time
import multiprocessing
import os
import pymysql
config = {
'db': 'AItest',
'user': 'user',
'passwd': '123456',
'cursorclass': pymysql.cursors.DictCursor,
'host': '10.33.2.231',
'use_unicode': True
}
base_url = "http://edu.hivoice.cn:8085/eval/mp3"
text = 'It is summer. It is midnight. We are headed south. As they travel south, the men and women in this ship will be bitterly cold. Some will burn their faces, wind will sear them, but they will feel fortunate to have become part of a great adventure. For thousands of years, as human beings spread across the planet, no one came here. Antarctica was as remote as the moon. Ancient Greeks reasoned that the world was round, and that there must be a great southern continent. They imagined a land of strange beasts. It was the greatest mystery on Earth.'
# ------- send request with upload file by requests ---------
def requests_post(arg, factor):
conn = pymysql.connect(**config)
try:
with conn.cursor() as cursor:
try:
cursor.execute('insert into yun (studentid, score_human, score_coefficient, begin) VALUES (%s ,%s, %s, %s)',
(arg['studentid'], arg['score_human'], factor, int(time.time())))
conn.commit()
conn.close()
url = base_url
headers = {'appkey': "u7capvdv33lozvoarpz2uagbptwgczgb7oy76ga3", 'score-coefficient': str(factor)}
data_form = {
'text': text,
'mode': 'D',
}
path = '''./A-1000/''' + arg['file']
t0 = time.time()
with open(path,'rb') as f:
files = {'voice':f}
r2 = requests.post(url, headers=headers, data=data_form, files=files)
t1 = time.time()
print(t1-t0)
result= r2.json()['lines'][0]
m_score = result['score']
fluency = result['fluency']
pronuciation = result['pronunciation']
intergrity = result['integrity']
conn = pymysql.connect(**config)
with conn.cursor() as cursor:
cursor.execute("update `yun` set `score_machine`=%s,`fluency`=%s ,`pronuciation`=%s, `intergrity`=%s, end=%s, file=%s where `studentid`=%s and score_coefficient=%s",(m_score, fluency, pronuciation, intergrity,int(time.time()), arg['file'], arg['studentid'], factor))
conn.commit()
conn.close()
except Exception:
print(Exception)
except Exception:
print(Exception)
# [0.6,0.7,0.8,0.9,1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9]
for factor in [1.8, 1.9]:
with open('./index.txt') as f:
contents = f.readlines()[1:]
content = [content.split('\t') for content in contents ]
for line in content:
arg = {'studentid': line[2], 'score_human': line[4], 'file': line[3] }
requests_post(arg, factor)
if __name__ == "__main1__":
localtime0 = time.asctime(time.localtime(time.time()))
print(localtime0)
t0 = time.time()
for i in range(100):
requests_post(1)
t1 = time.time()
print('time sp', (t1-t0,))
localtime1 = time.asctime(time.localtime(time.time()))
print(localtime1)
|
the-stack_0_18285 | from flask_mail import Message
from flask import render_template
# from . import mail
def mail_message(subject,template,to,**kwargs):
sender_email = [email protected]
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email) |
the-stack_0_18288 | # -*- coding: utf-8 -*-
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.formatting.formatting import ConditionalFormattingList
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
import plotly.graph_objects as go
import urllib.request
import numpy as np
import plotly.express as px
# # Read data
filepat_data = 'H:/cloud/cloud_data/Projects/DISCHARGEMaster/src/scripts/medication/sanStat.xlsx'
filepath_html = 'H:/cloud/cloud_data/Projects/MixedEffectModels/src/scripts/medication/html/medication_plot.html'
data = pd.read_excel(filepat_data, sheet_name='for graph by R')
# Create labels and links
label_org = list(np.unique((data['sanStat ( left column in the graph)']))) + list(np.unique((data['statins2 ( right column in the graph) '])))
label = [label_org[1],label_org[0]] + label_org[2:]
source = [0,0,1,1,2,2,3,3,4,4,5,5]
target = [6,7,6,7,6,7,6,7,6,7,6,7]
# Update values
value=[]
for s,t in zip(source, target):
dfs = data[(data['sanStat ( left column in the graph)']==label[s]) & (data['statins2 ( right column in the graph) ']==label[t])]
value.append(len(dfs))
# Set color for lines and nodes
color_links = ['rgba(0,255,0,0.5)' for i in range(4)] + ['rgba(255,255,0,0.5)' for i in range(4)] + ['rgba(255,0,0,0.5)' for i in range(4)]
color_node = ['rgba(0,255,0,0.5)' for i in range(2)] + ['rgba(255,255,0,0.5)' for i in range(2)] + ['rgba(255,0,0,0.5)' for i in range(2)] + ['rgba(255,0,255,0.5)', 'rgba(128,0,128,0.5)']
# Create figure
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 15,
thickness = 20,
line = dict(color = "black", width = 0.5),
label = label,
color = color_node,
),
link = dict(
source = source,
target = target,
value = value,
color = color_links,
))])
fig.update_layout(title_text="Statine statistics", font_size=10)
fig.show()
# Export html plot
fig.write_html(filepath_html)
|
the-stack_0_18289 | import unittest
from features import youtube_search
from unittest.mock import MagicMock
from unittest.mock import patch
from unittest.mock import call
from tests import MockBee
mock_bee = MockBee()
youtube_search_obj = youtube_search.Feature(mock_bee.bumblebee_api)
class TestYoutubeSearch(unittest.TestCase):
def test_search_function_called(self):
input = "show me a video on youtube videos"
youtube_search_obj.search = MagicMock(return_value="youtube videos")
query = youtube_search_obj.action(input)
youtube_search_obj.search.assert_called_once_with(query)
self.assertEquals(query, "youtube videos")
def test_search_with_arguments_list(self):
arguments_list = ["python", "java", "c++", "kotlin"]
youtube_search_obj.search = MagicMock()
youtube_search_obj.action("", arguments_list)
youtube_search_obj.search.assert_has_calls([
call("python"), call("java"), call("c++"), call("kotlin")],
any_order=False)
def test_browser_open_function_called(self):
input = "show me a video on youtube videos"
with patch('webbrowser.open') as mock_wbopen:
query = youtube_search_obj.action(input)
mock_wbopen.assert_called_once_with(
"https://www.youtube.com/results?search_query='{}'".format(
query))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_18292 | from __future__ import annotations
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
from .config import Config
from .library import set_config, process_cached_logs
from .__header__ import __header__
# Load User Defined Config
DEFAULT_CONFIG_PATH = f'~/.config/{__header__.lower()}'
CONFIG_PATH = os.environ.get(f'{__header__}_CONFIG_PATH', DEFAULT_CONFIG_PATH)
CONFIG = Config(CONFIG_PATH)
# Logging Configuration
logger = logging.getLogger(__header__)
set_config(CONFIG, 'logging.path')
set_config(
CONFIG,
'logging.format',
'%(asctime)s - %(module)s:%(lineno)s - %(levelname)s - %(message)s',
)
set_config(CONFIG, 'logging.level', 'INFO')
loghandler_sys = logging.StreamHandler(sys.stdout)
# Checking if log path is set
if CONFIG.logging_path:
CONFIG.logging_path += (
f'{__header__}.log'
if CONFIG.logging_path[-1] == '/'
else f'/{__header__}.log'
)
# Set default log file options
set_config(CONFIG, 'logging.backup_count', 3, int)
set_config(CONFIG, 'logging.rotate_bytes', 512000, int)
# Configure file handler
loghandler_file = RotatingFileHandler(
os.path.expanduser(CONFIG.logging_path),
'a',
CONFIG.logging_rotate_bytes,
CONFIG.logging_backup_count,
)
# Add to file formatter
loghandler_file.setFormatter(logging.Formatter(CONFIG.logging_format))
logger.addHandler(loghandler_file)
# Configure and add to stdout formatter
loghandler_sys.setFormatter(logging.Formatter(CONFIG.logging_format))
logger.addHandler(loghandler_sys)
logger.setLevel(CONFIG.logging_level)
# Load module environment variables
set_config(CONFIG, 'sublime.project_file')
set_config(CONFIG, 'sublime.virtualenv', os.environ.get('VIRTUAL_ENV'))
# Print logged messages
process_cached_logs(CONFIG, logger)
|
the-stack_0_18293 | # ------------------------------------------------------------------------------
# CodeHawk C Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import argparse
import os
from typing import Any, Dict
import chc.util.fileutil as UF
violationcategories = ["V", "S", "D", "U", "O"]
safecontrolcategories = ["S", "D", "X", "U", "O"]
vhandled = ["V"]
shandled = ["S", "X"]
violations = "vs"
safecontrols = "sc"
def get_variant_description(variants, testcase):
if testcase in variants:
return variants[testcase]
else:
return "?"
def parse():
parser = argparse.ArgumentParser()
parser.add_argument(
"variant",
help=(
"sequence number of variant, e.g. 01, or 09, or 61, etc."
+ " (type ? to see a list of available variants)"
),
)
parser.add_argument("--cwe", help="only report on the given cwe")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse()
cwerequested = "all"
if args.cwe is not None:
cwerequested = args.cwe
try:
testcases = UF.get_flattened_juliet_testcases()
variants = UF.get_juliet_variant_descriptions()
except UF.CHError as e:
print(str(e.wrap()))
exit(1)
if args.variant not in variants:
print("=" * 80)
print("Juliet Test Suite control flow / dataflow variants")
print("-" * 80)
for v in sorted(variants):
print(v.ljust(5) + variants[v])
print("-" * 80)
exit(0)
stotals: Dict[Any, Any] = {}
stotals[violations] = {}
stotals[safecontrols] = {}
for c in violationcategories:
stotals[violations][c] = 0
for c in safecontrolcategories:
stotals[safecontrols][c] = 0
vppototals = 0
sppototals = 0
vppohandled = 0
sppohandled = 0
tnamelength = 0
for cwe in testcases:
maxlen = max(len(t) for t in testcases[cwe]) + 3
if maxlen > tnamelength:
tnamelength = maxlen
print(
"\n\nSummary for Juliet Test variant "
+ args.variant
+ ":\n "
+ get_variant_description(variants, args.variant)
)
print("\n")
print(
"test".ljust(tnamelength + 10) + "violations safe-controls"
)
print(
" ".ljust(tnamelength + 4)
+ "V S D U O S D X U O"
)
print("-" * (tnamelength + 64))
for cwe in sorted(testcases):
if not (cwe == cwerequested or cwerequested == "all"):
continue
print("\n" + cwe)
ctotals: Dict[Any, Any] = {}
ctotals[violations] = {}
ctotals[safecontrols] = {}
for c in violationcategories:
ctotals[violations][c] = 0
for c in safecontrolcategories:
ctotals[safecontrols][c] = 0
for cc in testcases[cwe]:
testtotals = UF.read_juliet_test_summary(cwe, cc)
if not (testtotals is None):
if args.variant not in testtotals:
print(cc.ljust(tnamelength))
continue
totals = testtotals[args.variant]
print(
cc.ljust(tnamelength)
+ "".join(
[
str(totals[violations][c]).rjust(5)
for c in violationcategories
]
)
+ " | "
+ "".join(
[
str(totals[safecontrols][c]).rjust(5)
for c in safecontrolcategories
]
)
)
for c in violationcategories:
ctotals[violations][c] += totals[violations][c]
stotals[violations][c] += totals[violations][c]
vppototals += totals[violations][c]
if c in vhandled:
vppohandled += totals[violations][c]
for c in safecontrolcategories:
ctotals[safecontrols][c] += totals[safecontrols][c]
stotals[safecontrols][c] += totals[safecontrols][c]
sppototals += totals[safecontrols][c]
if c in shandled:
sppohandled += totals[safecontrols][c]
else:
print(
cc.ljust(tnamelength)
+ ("-" * int(44 - (tnamelength / 2)))
+ " not found "
+ ("-" * int(44 - (tnamelength / 2)))
)
print("-" * (tnamelength + 64))
print(
"total".ljust(tnamelength)
+ "".join(
[str(ctotals[violations][c]).rjust(5) for c in violationcategories]
)
+ " | "
+ "".join(
[str(ctotals[safecontrols][c]).rjust(5) for c in safecontrolcategories]
)
)
print("\n\n")
print("=" * (tnamelength + 64))
print(
"grand total".ljust(tnamelength)
+ "".join([str(stotals[violations][c]).rjust(5) for c in violationcategories])
+ " | "
+ "".join(
[str(stotals[safecontrols][c]).rjust(5) for c in safecontrolcategories]
)
)
ppototals = vppototals + sppototals
ppohandled = vppohandled + sppohandled
if vppototals > 0:
vperc = float(vppohandled) / float(vppototals) * 100.0
else:
vperc = 0.0
if sppototals > 0:
sperc = float(sppohandled) / float(sppototals) * 100.0
else:
sperc = 0.0
if ppototals > 0:
perc = float(ppohandled) / float(ppototals) * 100.0
else:
perc = 0.0
print("\n\n" + " ".ljust(28) + "violation safe-control total")
print("-" * 80)
print(
"ppos".ljust(20)
+ str(vppototals).rjust(15)
+ str(sppototals).rjust(15)
+ str(ppototals).rjust(15)
)
print(
"reported".ljust(20)
+ str(vppohandled).rjust(15)
+ str(sppohandled).rjust(15)
+ str(ppohandled).rjust(15)
)
print(
"percent reported".ljust(20)
+ str("{:.1f}".format(vperc)).rjust(15)
+ str("{:.1f}".format(sperc)).rjust(15)
+ str("{:.1f}".format(perc)).rjust(15)
)
print("-" * 80)
|
the-stack_0_18294 | from __future__ import absolute_import
import os
import logging
from lintreview.tools import Tool, run_command, process_quickfix
from lintreview.utils import in_path
log = logging.getLogger(__name__)
class Flake8(Tool):
name = 'flake8'
# see: http://flake8.readthedocs.org/en/latest/config.html
PYFLAKE_OPTIONS = [
'config',
'exclude',
'filename',
'format',
'ignore',
'max-complexity',
'max-line-length',
'select',
'snippet',
]
def check_dependencies(self):
"""
See if flake8 is on the PATH
"""
return in_path('flake8')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext == '.py'
def process_files(self, files):
"""
Run code checks with flake8.
Only a single process is made for all files
to save resources.
"""
log.debug('Processing %s files with %s', len(files), self.name)
command = self.make_command(files)
output = run_command(command, split=True, ignore_error=True)
if not output:
log.debug('No flake8 errors found.')
return False
process_quickfix(self.problems, output, lambda name: name)
def make_command(self, files):
command = ['flake8', '--isolated']
for option in self.options:
if option in self.PYFLAKE_OPTIONS:
command.extend([
'--%s' % option,
self.options.get(option)
])
else:
log.warning('Set non-existent flake8 option: %s', option)
command += files
return command
|
the-stack_0_18296 | #!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test asmap config argument for ASN-based IP bucketing.
Verify node behaviour and debug log when launching freedomcoind in these cases:
1. `freedomcoind` with no -asmap arg, using /16 prefix for IP bucketing
2. `freedomcoind -asmap=<absolute path>`, using the unit test skeleton asmap
3. `freedomcoind -asmap=<relative path>`, using the unit test skeleton asmap
4. `freedomcoind -asmap/-asmap=` with no file specified, using the default asmap
5. `freedomcoind -asmap` with no file specified and a missing default asmap file
6. `freedomcoind -asmap` with an empty (unparsable) default asmap file
The tests are order-independent.
"""
import os
import shutil
from test_framework.test_framework import FreedomCoinTestFramework
DEFAULT_ASMAP_FILENAME = 'ip_asn.map' # defined in src/init.cpp
ASMAP = '../../src/test/data/asmap.raw' # path to unit test skeleton asmap
VERSION = 'fec61fa21a9f46f3b17bdcd660d7f4cd90b966aad3aec593c99b35f0aca15853'
def expected_messages(filename):
return ['Opened asmap file "{}" (59 bytes) from disk'.format(filename),
'Using asmap version {} for IP bucketing'.format(VERSION)]
class AsmapTest(FreedomCoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def test_without_asmap_arg(self):
self.log.info('Test freedomcoind with no -asmap arg passed')
self.stop_node(0)
with self.node.assert_debug_log(['Using /16 prefix for IP bucketing']):
self.start_node(0)
def test_asmap_with_absolute_path(self):
self.log.info('Test freedomcoind -asmap=<absolute path>')
self.stop_node(0)
filename = os.path.join(self.datadir, 'my-map-file.map')
shutil.copyfile(self.asmap_raw, filename)
with self.node.assert_debug_log(expected_messages(filename)):
self.start_node(0, ['-asmap={}'.format(filename)])
os.remove(filename)
def test_asmap_with_relative_path(self):
self.log.info('Test freedomcoind -asmap=<relative path>')
self.stop_node(0)
name = 'ASN_map'
filename = os.path.join(self.datadir, name)
shutil.copyfile(self.asmap_raw, filename)
with self.node.assert_debug_log(expected_messages(filename)):
self.start_node(0, ['-asmap={}'.format(name)])
os.remove(filename)
def test_default_asmap(self):
shutil.copyfile(self.asmap_raw, self.default_asmap)
for arg in ['-asmap', '-asmap=']:
self.log.info('Test freedomcoind {} (using default map file)'.format(arg))
self.stop_node(0)
with self.node.assert_debug_log(expected_messages(self.default_asmap)):
self.start_node(0, [arg])
os.remove(self.default_asmap)
def test_default_asmap_with_missing_file(self):
self.log.info('Test freedomcoind -asmap with missing default map file')
self.stop_node(0)
msg = "Error: Could not find asmap file \"{}\"".format(self.default_asmap)
self.nodes[0].assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
def test_empty_asmap(self):
self.log.info('Test freedomcoind -asmap with empty map file')
self.stop_node(0)
with open(self.default_asmap, "w", encoding="utf-8") as f:
f.write("")
msg = "Error: Could not parse asmap file \"{}\"".format(self.default_asmap)
self.nodes[0].assert_start_raises_init_error(extra_args=['-asmap'], expected_msg=msg)
os.remove(self.default_asmap)
def run_test(self):
self.node = self.nodes[0]
self.datadir = os.path.join(self.node.datadir, 'regtest')
self.default_asmap = os.path.join(self.datadir, DEFAULT_ASMAP_FILENAME)
self.asmap_raw = os.path.join(os.path.dirname(os.path.realpath(__file__)), ASMAP)
self.test_without_asmap_arg()
self.test_asmap_with_absolute_path()
self.test_asmap_with_relative_path()
self.test_default_asmap()
self.test_default_asmap_with_missing_file()
self.test_empty_asmap()
if __name__ == '__main__':
AsmapTest().main()
|
the-stack_0_18297 | #!/usr/bin/python3
import getopt
import json
import sys
import traceback
from datetime import datetime, timedelta
from curwmysqladapter import MySQLAdapter
def usage():
usage_text = """
Usage: ./TIDAL_TO_OUTFLOW.py [-d YYYY-MM-DD] [-h]
-h --help Show usage
-d --date Model State Date in YYYY-MM. Default is current date.
-t --time Model State Time in HH:MM:SS. Default is current time.
--start-date Start date of timeseries which need to run the forecast in YYYY-MM-DD format.
Default is same as -d(date).
--start-time Start time of timeseries which need to run the forecast in HH:MM:SS format.
Default is same as -t(date).
-T --tag Tag to differential simultaneous Forecast Runs E.g. wrf1, wrf2 ...
-f --forceInsert Force Insert into the database. May override existing values.
-n --name Name field value of the Run table in Database.
Use time format such as 'Cloud-1-<%H:%M:%S>' to replace with time(t).
"""
print(usage_text)
def get_forecast_timeseries(my_adapter, my_event_id, my_opts):
existing_timeseries = my_adapter.retrieve_timeseries([my_event_id], my_opts)
new_timeseries = []
if len(existing_timeseries) > 0 and len(existing_timeseries[0]['timeseries']) > 0:
existing_timeseries = existing_timeseries[0]['timeseries']
for ex_step in existing_timeseries:
if ex_step[0] - ex_step[0].replace(minute=0, second=0, microsecond=0) > timedelta(minutes=30):
new_timeseries.append(
[ex_step[0].replace(minute=0, second=0, microsecond=0) + timedelta(hours=1), ex_step[1]])
else:
new_timeseries.append(
[ex_step[0].replace(minute=0, second=0, microsecond=0), ex_step[1]])
return new_timeseries
f = None
try:
CONFIG = json.loads(open('CONFIG.json').read())
# print('Config :: ', CONFIG)
CSV_NUM_METADATA_LINES = 2
DAT_WIDTH = 12
TIDAL_FORECAST_ID = "ebcc2df39aea35de15cca81bc5f15baffd94bcebf3f169add1fd43ee1611d367"
CONTROL_INTERVAL = 6 * 24 * 60 # In minutes (6 day)
OUTFLOW_DAT_FILE = './FLO2D/OUTFLOW.DAT'
OUTPUT_DIR = './OUTPUT'
INIT_TIDAL_CONFIG = './Template/INITTIDAL.CONF'
MYSQL_HOST = "localhost"
MYSQL_USER = "root"
MYSQL_DB = "curw"
MYSQL_PASSWORD = ""
if 'OUTFLOW_DAT_FILE' in CONFIG:
OUTFLOW_DAT_FILE = CONFIG['OUTFLOW_DAT_FILE']
if 'OUTPUT_DIR' in CONFIG:
OUTPUT_DIR = CONFIG['OUTPUT_DIR']
if 'INIT_TIDAL_CONFIG' in CONFIG:
INIT_TIDAL_CONFIG = CONFIG['INIT_TIDAL_CONFIG']
if 'MYSQL_HOST' in CONFIG:
MYSQL_HOST = CONFIG['MYSQL_HOST']
if 'MYSQL_USER' in CONFIG:
MYSQL_USER = CONFIG['MYSQL_USER']
if 'MYSQL_DB' in CONFIG:
MYSQL_DB = CONFIG['MYSQL_DB']
if 'MYSQL_PASSWORD' in CONFIG:
MYSQL_PASSWORD = CONFIG['MYSQL_PASSWORD']
date = ''
time = ''
startDate = ''
startTime = ''
tag = ''
forceInsert = False
runName = 'Cloud-1'
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:t:T:fn:", [
"help", "date=", "time=", "start-date=", "start-time=", "tag=", "force", "runName="
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt in ("-d", "--date"):
date = arg
elif opt in ("-t", "--time"):
time = arg
elif opt in "--start-date":
startDate = arg
elif opt in "--start-time":
startTime = arg
elif opt in ("-T", "--tag"):
tag = arg
elif opt in ("-f", "--force"):
forceInsert = True
elif opt in ("-n", "--name"):
runName = arg
# Default run for current day
modelState = datetime.now()
if date:
modelState = datetime.strptime(date, '%Y-%m-%d')
date = modelState.strftime("%Y-%m-%d")
if time:
modelState = datetime.strptime('%s %s' % (date, time), '%Y-%m-%d %H:%M:%S')
time = modelState.strftime("%H:%M:%S")
startDateTime = datetime.now()
if startDate:
startDateTime = datetime.strptime(startDate, '%Y-%m-%d')
else:
startDateTime = datetime.strptime(date, '%Y-%m-%d')
startDate = startDateTime.strftime("%Y-%m-%d")
if startTime:
startDateTime = datetime.strptime('%s %s' % (startDate, startTime), '%Y-%m-%d %H:%M:%S')
startTime = startDateTime.strftime("%H:%M:%S")
print('TIDAL_TO_OUTFLOW startTime:', datetime.now().strftime("%Y-%m-%d %H:%M:%S"), tag)
print(' TIDAL_TO_OUTFLOW run for', date, '@', time, tag)
print(' With Custom starting', startDate, '@', startTime, ' run name:', runName)
# Get Observed Data
adapter = MySQLAdapter(host=MYSQL_HOST, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB)
opts = {
'from': (startDateTime - timedelta(minutes=0)).strftime("%Y-%m-%d %H:%M:%S"),
'to': (startDateTime + timedelta(minutes=CONTROL_INTERVAL)).strftime("%Y-%m-%d %H:%M:%S"),
}
tidal_timeseries = get_forecast_timeseries(adapter, TIDAL_FORECAST_ID, opts)
if len(tidal_timeseries) > 0:
print('tidal_timeseries::', len(tidal_timeseries), tidal_timeseries[0], tidal_timeseries[-1])
else:
print('No data found for tidal timeseries: ', tidal_timeseries)
sys.exit(1)
fileName = OUTFLOW_DAT_FILE.rsplit('.', 1)
OUTFLOW_DAT_FILE_PATH = '{name}{tag}.{extension}'.\
format(name=fileName[0], tag='.' + tag if tag else '', extension=fileName[1])
print('Open FLO2D OUTFLOW ::', OUTFLOW_DAT_FILE_PATH)
f = open(OUTFLOW_DAT_FILE_PATH, 'w')
lines = []
print('Reading INIT TIDAL CONF...')
with open(INIT_TIDAL_CONFIG) as initTidalConfFile:
initTidalLevels = initTidalConfFile.readlines()
for initTidalLevel in initTidalLevels:
if len(initTidalLevel.split()): # Check if not empty line
lines.append(initTidalLevel)
if initTidalLevel[0] == 'N':
lines.append('{0} {1:{w}} {2:{w}}\n'.format('S', 0, 0, w=DAT_WIDTH))
base_date_time = startDateTime.replace(minute=0, second=0, microsecond=0)
for step in tidal_timeseries:
hours_so_far = (step[0] - base_date_time)
hours_so_far = 24 * hours_so_far.days + hours_so_far.seconds / (60 * 60)
lines.append('{0} {1:{w}} {2:{w}{b}}\n'
.format('S', int(hours_so_far), float(step[1]), b='.2f', w=DAT_WIDTH))
f.writelines(lines)
print('Finished writing OUTFLOW.DAT')
except Exception as e:
print(e)
traceback.print_exc()
finally:
if f:
f.close()
|
the-stack_0_18298 | import logging
import multiprocessing
import sys
import threading
from multiprocessing import process
import socket
import time
import random
# Test function
def make_socket(ip='localhost', port=2021,boarding=False):
name = process.current_process().name
logging.info(f'Process starts: name = {name}')
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
address = (ip,port)
if boarding == True:
logging.info(f'{name}: start to send')
else:
s.bind(address)
logging.info(f'{name}: start to bind socket {s} to {ip}/{port}')
with s: # open and close
while True:
if boarding == True:
logging.info(f'{name}: Sending...')
data_send = b'Hello UDP'
s.sendto(data_send,address)
time.sleep(1)
else:
data_recv, addr = s.recvfrom(1024)
logging.info(f'{name}: Receiving from {addr} = {data_recv}')
time.sleep(1)
def main():
name = process.current_process().name
logging.info(f'Running {name} as {__name__}')
# ip = 'localhost'
# port = 24050
# broadcaster = make_socket(ip,port,boarding=True)
# make it in process
broadcaster = multiprocessing.Process(target=make_socket,kwargs={'boarding':True},daemon=True,name='Boardcaster')
listener = multiprocessing.Process(target=make_socket, kwargs={'boarding': False},daemon=True,name='Listener')
broadcaster.start()
listener.start()
logging.info('Do something on the main thread')
timer = threading.Timer(25,sys.exit,[0])
timer.start()
logging.info(f'Finished {name}')
# global the setting in case of version and hardware issues
logging.basicConfig(format='%(levelname)s - %(asctime)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
if __name__ == '__main__':
main()
|
the-stack_0_18299 | # -*- coding: utf-8 -*-
def f(k, a, x):
count = 0
for ai in a:
count += max(ai - x, 0)
return count <= k
def main():
import sys
input = sys.stdin.readline
n, k = map(int, input().split())
a = list(map(int, input().split()))
ng = -1
ok = 2 * 10 ** 9 + 1
while ok - ng > 1:
mid = (ok + ng) // 2
if f(k, a, mid):
ok = mid
else:
ng = mid
summed = 0
ans = 0
for ai in a:
count = max(0, ai - ok)
if count > 0:
ans += (ai * (ai + 1) // 2) - (ok * (ok + 1) // 2)
summed += count
ans += (k - summed) * ok
print(ans)
if __name__ == "__main__":
main()
|
the-stack_0_18300 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF metrics that work in the multi-agent case."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import tf_driver
from tf_agents.metrics import tf_metric
from tf_agents.metrics.tf_metrics import TFDeque
from tf_agents.utils import common
def zero_out_new_episodes(trajectory, return_accumulator):
return tf.where(trajectory.is_first(), tf.zeros_like(return_accumulator),
return_accumulator)
@gin.configurable
class AverageReturnMetric(tf_metric.TFStepMetric):
"""Metric for the average collective return and individual agent returns."""
def __init__(self,
n_agents,
name='MultiagentAverageReturn',
prefix='Metrics',
dtype=tf.float32,
batch_size=1,
buffer_size=10):
super(AverageReturnMetric, self).__init__(name=name, prefix=prefix)
self.n_agents = n_agents
self._dtype = dtype
# Accumulator and buffer for the average return of all agents
self._collective_return_accumulator = common.create_variable(
initial_value=0, dtype=dtype, shape=(batch_size,), name='Accumulator')
self._collective_buffer = TFDeque(buffer_size, dtype)
# Accumulators for each agent's independent reward
self._agent_return_accumulators = []
for a in range(n_agents):
self._agent_return_accumulators.append(common.create_variable(
initial_value=0, dtype=dtype, shape=(batch_size,),
name='Accumulator' + str(a)))
# Buffers for each agent's independent reward
self._agent_buffers = []
for a in range(n_agents):
self._agent_buffers.append(TFDeque(buffer_size, dtype))
@common.function(autograph=True)
def call(self, trajectory):
# Zero out batch indices where a new episode is starting.
self._collective_return_accumulator.assign(
zero_out_new_episodes(trajectory, self._collective_return_accumulator))
for a in range(self.n_agents):
self._agent_return_accumulators[a].assign(
zero_out_new_episodes(trajectory, self._agent_return_accumulators[a]))
# Note that trajectory.reward has shape (batch, n_agents)
# Update accumulator with sum of received rewards.
self._collective_return_accumulator.assign_add(
tf.reduce_mean(trajectory.reward, axis=1))
# Pull out data for each agent and assign
for a in range(self.n_agents):
self._agent_return_accumulators[a].assign_add(trajectory.reward[:, a])
# Add final returns to buffer.
last_episode_indices = tf.squeeze(tf.where(trajectory.is_last()), axis=-1)
for indx in last_episode_indices:
self._collective_buffer.add(self._collective_return_accumulator[indx])
# Agent buffers that use the global done
for a in range(self.n_agents):
self._agent_buffers[a].add(self._agent_return_accumulators[a][indx])
return trajectory
def result(self):
return self._collective_buffer.mean()
def result_for_agent(self, agent_id):
return self._agent_buffers[agent_id].mean()
@common.function
def reset(self):
self._collective_buffer.clear()
self._collective_return_accumulator.assign(
tf.zeros_like(self._collective_return_accumulator))
for a in range(self.n_agents):
self._agent_buffers[a].clear()
self._agent_return_accumulators[a].assign(
tf.zeros_like(self._agent_return_accumulators[a]))
def tf_summaries(self, train_step=None, step_metrics=()):
"""Generates summaries for all agents & collective summary against steps.
Args:
train_step: (Optional) Step counter for training iterations. If None, no
metric is generated against the global step.
step_metrics: (Optional) Iterable of step metrics to generate summaries
against.
Returns:
A list of summaries.
"""
summaries = super(AverageReturnMetric, self).tf_summaries(
train_step=train_step, step_metrics=step_metrics)
for a in range(self.n_agents):
summaries.extend(self.single_agent_summary(
a, train_step, step_metrics))
return summaries
def single_agent_summary(self, agent_id, train_step=None, step_metrics=()):
summaries = []
prefix = self._prefix
name = self.name + '_agent' + str(agent_id)
tag = common.join_scope(prefix, name)
result = self.result_for_agent(agent_id)
if train_step is not None:
summaries.append(
tf.compat.v2.summary.scalar(name=tag, data=result, step=train_step))
if prefix:
prefix += '_'
for step_metric in step_metrics:
# Skip plotting the metrics against itself.
if self.name == step_metric.name:
continue
step_tag = '{}vs_{}/{}'.format(prefix, step_metric.name, name)
# Summaries expect the step value to be an int64.
step = tf.cast(step_metric.result(), tf.int64)
summaries.append(tf.compat.v2.summary.scalar(
name=step_tag,
data=result,
step=step))
return summaries
def log_metrics(metrics, prefix=''):
log = []
for m in metrics:
log.append('{0} = {1}'.format(m.name, m.result()))
if 'Multiagent' in m.name:
log += ['{0} = {1}'.format(
m.name + '_agent' + str(a),
m.result_for_agent(a)) for a in range(m.n_agents)]
logging.info('%s \n\t\t %s', prefix, '\n\t\t '.join(log))
@gin.configurable
def eager_compute(metrics,
environment,
policy,
num_episodes=1,
train_step=None,
summary_writer=None,
summary_prefix='',
use_function=True):
"""Compute metrics using `policy` on the `environment`.
*NOTE*: Because placeholders are not compatible with Eager mode we can not use
python policies. Because we use tf_policies we need the environment time_steps
to be tensors making it easier to use a tf_env for evaluations. Otherwise this
method mirrors `compute` directly.
Args:
metrics: List of metrics to compute.
environment: tf_environment instance.
policy: tf_policy instance used to step the environment.
num_episodes: Number of episodes to compute the metrics over.
train_step: An optional step to write summaries against.
summary_writer: An optional writer for generating metric summaries.
summary_prefix: An optional prefix scope for metric summaries.
use_function: Option to enable use of `tf.function` when collecting the
metrics.
Returns:
A dictionary of results {metric_name: metric_value}
"""
for metric in metrics:
metric.reset()
multiagent_metrics = [m for m in metrics if 'Multiagent' in m.name]
driver = tf_driver.TFDriver(
environment,
policy,
observers=metrics,
max_episodes=num_episodes,
disable_tf_function=not use_function
)
def run_driver():
time_step = environment.reset()
policy_state = policy.get_initial_state(environment.batch_size)
driver.run(time_step, policy_state)
if use_function:
common.function(run_driver)()
else:
run_driver()
results = [(metric.name, metric.result()) for metric in metrics]
for m in multiagent_metrics:
for a in range(m.n_agents):
results.append((m.name + '_agent' + str(a), m.result_for_agent(a)))
# TODO(b/120301678) remove the summaries and merge with compute
if train_step and summary_writer:
with summary_writer.as_default():
for m in metrics:
tag = common.join_scope(summary_prefix, m.name)
tf.compat.v2.summary.scalar(name=tag, data=m.result(), step=train_step)
if 'Multiagent' in m.name:
for a in range(m.n_agents):
tf.compat.v2.summary.scalar(name=tag + '_agent' + str(a),
data=m.result_for_agent(a),
step=train_step)
# TODO(b/130249101): Add an option to log metrics.
return collections.OrderedDict(results)
class MultiagentMetricsGroup(tf.Module):
"""Group a list of Metrics into a container."""
def __init__(self, metrics, name=None):
super(MultiagentMetricsGroup, self).__init__(name=name)
self.metrics = metrics
self.multiagent_metrics = [m for m in metrics if 'Multiagent' in m.name]
def results(self):
results = [(metric.name, metric.result()) for metric in self.metrics]
for m in self.multiagent_metrics:
for a in range(m.n_agents):
results.append((m.name + '_agent' + str(a), m.result_for_agent(a)))
return collections.OrderedDict(results)
|
the-stack_0_18302 | """
This is taken from https://github.com/saxix/django-adminactions/blob/develop/adminactions/utils.py
With minor modifications to make it python3 compatible.
"""
import six
def get_attr(obj, attr, default=None):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
"""
if '.' not in attr:
ret = getattr(obj, attr, default)
else:
L = attr.split('.')
ret = get_attr(getattr(obj, L[0], default), '.'.join(L[1:]), default)
if isinstance(ret, BaseException):
raise ret
return ret
def getattr_or_item(obj, name):
try:
ret = get_attr(obj, name, AttributeError())
except AttributeError:
try:
ret = obj[name]
except KeyError:
raise AttributeError("%s object has no attribute/item '%s'" % (obj.__class__.__name__, name))
return ret
def get_field_value(obj, field, usedisplay=True, raw_callable=False):
"""
returns the field value or field representation if get_FIELD_display exists
:param obj: :class:`django.db.models.Model` instance
:param field: :class:`django.db.models.Field` instance or ``basestring`` fieldname
:param usedisplay: boolean if True return the get_FIELD_display() result
:return: field value
>>> from django.contrib.auth.models import Permission
>>> p = Permission(name='perm')
>>> print get_field_value(p, 'name')
perm
"""
if isinstance(field, six.string_types):
fieldname = field
elif isinstance(field, models.Field):
fieldname = field.name
else:
raise ValueError('Invalid value for parameter `field`: Should be a field name or a Field instance ')
if usedisplay and hasattr(obj, 'get_%s_display' % fieldname):
value = getattr(obj, 'get_%s_display' % fieldname)()
else:
value = getattr_or_item(obj, fieldname)
if not raw_callable and callable(value):
return value()
return value
|
the-stack_0_18303 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0002_domain_domain'),
]
operations = [
migrations.AlterField(
model_name='domain',
name='domain',
field=models.ForeignKey(related_name='domain_domain', default=1, to='common.Domain'),
preserve_default=True,
),
]
|
the-stack_0_18305 | # SimpleCV Color Model Library
#load required libraries
from SimpleCV.base import *
from SimpleCV.ImageClass import *
class ColorModel:
"""
**SUMMARY**
The color model is used to model the color of foreground and background objects
by using a a training set of images.
You can create the color model with any number of "training" images, or
add images to the model with add() and remove(). Then for your data images,
you can useThresholdImage() to return a segmented picture.
"""
#TODO: Discretize the colorspace into smaller intervals,eg r=[0-7][8-15] etc
#TODO: Work in HSV space
mIsBackground = True
mData = {}
mBits = 1
def __init__(self, data = None, isBackground=True):
self.mIsBackground = isBackground
self.mData = {}
self.mBits = 1
if data:
try:
[ self.add(d) for d in data ]
except TypeError:
self.add(data)
def _makeCanonical(self, data):
"""
Turn input types in a common form used by the rest of the class -- a
4-bit shifted list of unique colors
"""
ret = ''
#first cast everything to a numpy array
if(data.__class__.__name__ == 'Image'):
ret = data.getNumpy().reshape(-1, 3)
elif(data.__class__.__name__ == 'cvmat'):
ret = np.array(data).reshape(-1, 3)
elif(data.__class__.__name__ == 'list' ):
temp = []
for d in data: #do the bgr conversion
t = (d[2],d[1],d[0])
temp.append(t)
ret = np.array(temp,dtype='uint8')
elif (data.__class__.__name__=='tuple'):
ret = np.array((data[2],data[1],data[0]),'uint8')
elif(data.__class__.__name__=='np.array'):
ret = data
else:
logger.warning("ColorModel: color is not in an accepted format!")
return None
rs = np.right_shift(ret, self.mBits) #right shift 4 bits
if( len(rs.shape) > 1 ):
uniques = np.unique(rs.view([('',rs.dtype)]*rs.shape[1])).view(rs.dtype).reshape(-1, 3)
else:
uniques = [rs]
#create a unique set of colors. I had to look this one up
#create a dict of encoded strings
return dict.fromkeys(map(np.ndarray.tostring, uniques), 1)
def reset(self):
"""
**SUMMARY**
Resets the color model. I.e. clears it out the stored values.
**RETURNS**
Nothing.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.add(Image("lenna))
>>> cm.clear()
"""
self.mData = {}
def add(self, data):
"""
**SUMMARY**
Add an image, array, or tuple to the color model.
**PARAMETERS**
* *data* - An image, array, or tupple of values to the color model.
**RETURNS**
Nothings.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.add(Image("lenna))
>>> cm.clear()
"""
self.mData.update(self._makeCanonical(data))
def remove(self, data):
"""
**SUMMARY**
Remove an image, array, or tuple from the model.
**PARAMETERS**
* *data* - An image, array, or tupple of value.
**RETURNS**
Nothings.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.add(Image("lenna))
>>> cm.remove(Color.BLACK)
"""
self.mData = dict.fromkeys(set(self.mData) ^ set(self._makeCanonical(data)), 1)
def threshold(self, img):
"""
**SUMMARY**
Perform a threshold operation on the given image. This involves iterating
over the image and comparing each pixel to the model. If the pixel is in the
model it is set to be either the foreground (white) or background (black) based
on the setting of mIsBackground.
**PARAMETERS**
* *img* - the image to perform the threshold on.
**RETURNS**
The thresholded image.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.add(color.RED)
>>> cm.add(color.BLUE)
>>> result = cm.threshold(Image("lenna")
>>> result.show()
"""
a = 0
b = 255
if( self.mIsBackground == False ):
a = 255
b = 0
rs = np.right_shift(img.getNumpy(), self.mBits).reshape(-1, 3) #bitshift down and reshape to Nx3
mapped = np.array(map(self.mData.has_key, map(np.ndarray.tostring, rs))) #map to True/False based on the model
thresh = np.where(mapped, a, b) #replace True and False with fg and bg
return Image(thresh.reshape(img.width, img.height))
def contains(self, c):
"""
**SUMMARY**
Return true if a particular color is in our color model.
**PARAMETERS**
* *c* - A three value color tupple.
**RETURNS**
Returns True if the color is in the model, False otherwise.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.add(Color.RED)
>>> cm.add(Color.BLUE)
>>> if( cm.contains(Color.RED) )
>>> print "Yo - we gots red y'all."
"""
#reverse the color, cast to uint8, right shift, convert to string, check dict
return self.mData.has_key(np.right_shift(np.cast['uint8'](c[::-1]), self.mBits).tostring())
def setIsForeground(self):
"""
**SUMMARY**
Set our model as being foreground imagery. I.e. things in the model are the foreground
and will be marked as white during the threhsold operation.
**RETURNS**
Nothing.
"""
mIsBackground = False
def setIsBackground(self):
"""
**SUMMARY**
Set our model as being background imagery. I.e. things in the model are the background
and will be marked as black during the threhsold operation.
**RETURNS**
Nothing.
"""
mIsBackground = True
def load(self, filename):
"""
**SUMMARY**
Load the color model from the specified file.
**TO DO**
This should be converted to pickle.
**PARAMETERS**
* *filename* - The file name and path to load the data from.
**RETURNS**
Nothing.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.load("myColors.txt")
>>> cm.add(Color.RED)
>>> cm.add(Color.BLUE)
>>> cm.save("mymodel)
"""
self.mData = load(open(filename))
def save(self, filename):
"""
**SUMMARY**
Save a color model file.
**PARAMETERS**
* *filename* - The file name and path to save the data to.
**RETURNS**
Nothing.
**EXAMPLE**
>>> cm = ColorModel()
>>> cm.add(Color.RED)
>>> cm.add(Color.BLUE)
>>> cm.save("mymodel.txt")
**TO DO**
This should be converted to pickle.
"""
dump(self.mData, open(filename, "wb"))
|
the-stack_0_18306 | import re
example = "".join(open("example.txt").readlines())
puzzle = "".join(open("puzzle.txt").readlines())
problem_input = puzzle
def parse_group(input: str):
votes = input.split("\n")
votes_map = dict()
for vote in votes:
for el in vote:
if el not in votes_map:
votes_map[el] = 0
votes_map[el] += 1
return votes_map, len(votes)
def part_a():
groups = problem_input.split("\n\n")
sum = 0
for g in groups:
parsed, votes = parse_group(g)
sum += len(parsed)
return sum
def part_b():
groups = problem_input.split("\n\n")
sum = 0
for g in groups:
parsed, votes = parse_group(g)
for i in parsed:
if parsed[i] == votes:
sum += 1
return sum
print("part_a: %d" % part_a())
print("part_b: %d" % part_b())
|
the-stack_0_18308 |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2010, Frank Scholz <[email protected]>
class WANCommonInterfaceConfigClient:
def __init__(self, service):
self.service = service
self.namespace = service.get_type()
self.url = service.get_control_url()
self.service.subscribe()
self.service.client = self
def remove(self):
if self.service != None:
self.service.remove()
self.service = None
self.namespace = None
self.url = None
del self
def subscribe_for_variable(self, var_name, callback, signal=False):
self.service.subscribe_for_variable(var_name, instance=0, callback=callback, signal=signal)
|
the-stack_0_18310 | #!/usr/bin/python
import wireless
from wifi import Cell # get all the networks
from scapy.all import *
'''
RadioTap BlogPost: http://wifinigel.blogspot.com/2013/11/what-are-radiotap-headers.html
'''
def jam(address): # addresses to jam
conf.iface = "wlp3s0"
bssid = address
client = "FF:FF:FF:FF:FF:FF" # broadcasting MAC to all the clients - disconnecting from the network
count = 3 # sending the Deauth packet
conf.verb = 0 # for scapy
# generating a packet (RadioTap injection on the 802.11 header)
packet = RadioTap()/Dot11(type=0, subtype=12, addr1=client, addr2=bssid, addr3=bssid)/Dot11Deauth(reason=7)
for n in range(int(count)):
sendp(packet) # sending generated packet
print("DeAuth Num: %s Sent via: %s to BSSID: %s for Client: %s" % (str(n), conf.iface, bssid, client))
if __name__ == "__main__":
# Wireless object
wifi1 = wireless.Wireless()
# selecting interface
iface = wifi1.interface()
print(iface)
# getting all network informations
all_networks = Cell.all(iface)
# adding address (future)
bssids = []
# displaying information
for wifi in all_networks:
print("Network name (Ssid): %s" % wifi.ssid)
print("Network address (Bssid): %s" % wifi.address)
print("Network channel: %s" % str(wifi.channel))
print("Network quality: %s" % str(wifi.quality))
# appending address to list
bssids.append(wifi.address)
# jamming process
while True:
for bssid in bssids:
print("Jamming on %s" % bssid)
jam(bssid)
|
the-stack_0_18311 | import sys
import os
import json
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from data import TranslationDataset
from transformers import BertTokenizerFast, BertTokenizer
from transformers import BertModel, BertForMaskedLM, BertConfig, EncoderDecoderModel
import math
# Identify the config file
if len(sys.argv) < 2:
print("No config file specified. Using the default config.")
configfile = "config.json"
else:
configfile = sys.argv[1]
# Read the params
with open(configfile, "r") as f:
config = json.load(f)
globalparams = config["global_params"]
modelparams = config["model_params"]
# Load the tokenizers
en_tokenizer = BertTokenizer.from_pretrained(globalparams["tokenizer_path"])
de_tokenizer = BertTokenizer.from_pretrained(globalparams["tokenizer_path"])
# Init the dataset
train_en_file = globalparams["train_en_file"]
train_de_file = globalparams["train_de_file"]
valid_en_file = globalparams["valid_en_file"]
valid_de_file = globalparams["valid_de_file"]
test_en_file = globalparams["test_en_file"]
test_de_file = globalparams["test_de_file"]
batch_size = modelparams["batch_size"]
train_dataset = TranslationDataset(train_en_file, train_de_file, en_tokenizer, de_tokenizer)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False, \
drop_last=True, num_workers=1, collate_fn=train_dataset.collate_function)
valid_dataset = TranslationDataset(valid_en_file, valid_de_file, en_tokenizer, de_tokenizer)
valid_dataloader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False, \
drop_last=True, num_workers=1, collate_fn=valid_dataset.collate_function)
test_dataset = TranslationDataset(test_en_file, test_de_file, en_tokenizer, de_tokenizer)
test_dataloader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, \
drop_last=True, num_workers=1, collate_fn=test_dataset.collate_function)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Using device:", device)
def count_parameters(mdl):
return sum(p.numel() for p in mdl.parameters() if p.requires_grad)
def compute_loss(predictions, targets, criterion, perplexity=False):
"""Compute our custom loss"""
#print("Compute loss: ")
#print("inputs, preds, targets", predictions.shape, targets.shape)
predictions = predictions[:, :-1, :].contiguous()
targets = targets[:, 1:]
#print("preds, targets", predictions.shape, targets.shape)
rearranged_output = predictions.view(predictions.shape[0]*predictions.shape[1], -1)
rearranged_target = targets.contiguous().view(-1)
#print(rearranged_output.shape, rearranged_target.shape)
#print(rearranged_target)
loss = criterion(rearranged_output, rearranged_target)
if(not perplexity):
#means that criterion passed in mean reduction, and currently training is going on.
return loss
else:
#eval mode is going on...criterion has sum reduction currently.
return loss, (rearranged_target != 0).sum()
def train_model(model, optimizer, criterion):
model.train()
epoch_loss = 0
for i, (en_input, en_masks, de_output, de_masks) in enumerate(train_dataloader):
optimizer.zero_grad()
en_input = en_input.to(device)
de_output = de_output.to(device)
en_masks = en_masks.to(device)
de_masks = de_masks.to(device)
#print(en_input.shape, de_output.shape, en_masks.shape, de_masks.shape)
labels = de_output.clone()
out = model(input_ids=en_input, attention_mask=en_masks,
decoder_input_ids=de_output, decoder_attention_mask=de_masks, labels=labels)
#print(len(out))
#print(out[0].shape)
#print(out[1].shape)
prediction_scores = out[1]
#print("pred scores: ", prediction_scores.shape)
predictions = F.log_softmax(prediction_scores, dim=2)
#print("predictions: ", predictions.shape, predictions)
#print("output: ", de_output.shape, de_output)
loss = compute_loss(predictions, de_output, criterion)
#print(loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
epoch_loss += loss.item()
print("Mean train loss:", (epoch_loss / len(train_dataloader)))
def eval_model(model, criterion, datatype="valid", perplexity=False):
dataloader = None
if(datatype == "train"):
dataloader = train_dataloader
elif(datatype == "valid"):
dataloader = valid_dataloader
elif(datatype == "test"):
dataloader = test_dataloader
else:
raise AssertionError
model.eval()
if(perplexity):
loss_sum = 0.0
count_eles = 0
else:
epoch_loss = 0
for i, (en_input, en_masks, de_output, de_masks) in enumerate(dataloader):
en_input = en_input.to(device)
de_output = de_output.to(device)
en_masks = en_masks.to(device)
de_masks = de_masks.to(device)
labels = de_output.clone()
with torch.no_grad():
out = model(input_ids=en_input, attention_mask=en_masks,
decoder_input_ids=de_output, decoder_attention_mask=de_masks, labels=labels)
prediction_scores = out[1]
predictions = F.log_softmax(prediction_scores, dim=2)
if(perplexity):
loss, eles = compute_loss(predictions, de_output, criterion, perplexity=perplexity)
loss_sum += loss.item()
count_eles += eles.item()
else:
loss = compute_loss(predictions, de_output, criterion, perplexity=perplexity)
epoch_loss += loss.item()
if(perplexity):
#eval mode.
mean_nll = loss_sum / count_eles
ppl = math.exp(mean_nll)
print("Perplexity: ", datatype, ppl)
else:
#training going on
print("Mean loss", datatype, (epoch_loss / len(dataloader)))
if(globalparams["do_train"]):
#load model from pretrained/scratch and train it/save it in the provided dir.
print("TRAIN MODE: ")
if(globalparams["pretrained"]):
#load pretrained encoder and pretrained decoder.
model = EncoderDecoderModel.from_encoder_decoder_pretrained(globalparams['pretrained_path'], globalparams['pretrained_path'])
print("pretrained model loaded.", globalparams["pretrained_path"])
else:
pass
model.to(device)
print(f'The model has {count_parameters(model):,} trainable parameters')
optimizer = optim.Adam(model.parameters(), lr=modelparams['lr'])
criterion = nn.NLLLoss(ignore_index=de_tokenizer.pad_token_id)
num_train_batches = len(train_dataloader)
num_valid_batches = len(valid_dataloader)
print("num batches: ", num_train_batches, num_valid_batches)
# MAIN TRAINING LOOP
for epoch in range(modelparams['num_epochs']):
print("Starting epoch", epoch+1)
train_model(model, optimizer, criterion)
eval_model(model, criterion)
print("Saving model ..")
save_location = modelparams['model_path']
if not os.path.exists(save_location):
os.makedirs(save_location)
model.save_pretrained(save_location)
if(globalparams["do_eval"]):
#load the trained encoder decoder model from the provided dir and then evaluate it on all three datasets on perplexity.
print("EVAL MODE: ")
model = EncoderDecoderModel.from_pretrained(modelparams['model_path'])
print("Trained EncDec Model loaded: ", modelparams["model_path"])
model.to(device)
criterion = nn.NLLLoss(ignore_index=de_tokenizer.pad_token_id, reduction='sum')
#TODO evaluate perplexity on each.
eval_model(model, criterion, datatype="train", perplexity=True)
eval_model(model, criterion, datatype="valid", perplexity=True)
eval_model(model, criterion, datatype="test", perplexity=True) |
the-stack_0_18313 | # coding: utf-8
###
# @file trainer.py
# @author Anton Ragot <[email protected]>, Jérémy Plassmann <[email protected]>
#
# @section LICENSE
#
# MIT License
#
# Copyright (c) 2020 Distributed Computing Laboratory, EPFL
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###
#!/usr/bin/env python
import argparse
from network import Network
from libs.Worker import Worker
from libs.PS import PS
from libs.ByzWorker import ByzWorker
from libs import tools
from aggregator_tf.aggregator import Aggregator_tf
import time
import os
import sys
# Allowing visualization of the log while the process is running over ssh
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1)
FLAGS = None
def main():
n_ps = Network(FLAGS.config_ps)
n_w = Network(FLAGS.config_w)
p = PS(n_ps, FLAGS.log, FLAGS.dataset, FLAGS.model, FLAGS.batch_size, FLAGS.nbbyzwrks)
p.start()
if n_w.get_my_attack() != 'None':
w = ByzWorker(n_w, FLAGS.log, FLAGS.dataset, FLAGS.model, FLAGS.batch_size, FLAGS.nbbyzwrks)
else:
w = Worker(n_w, FLAGS.log, FLAGS.dataset, FLAGS.model, FLAGS.batch_size, FLAGS.nbbyzwrks)
w.start()
model_aggregator = Aggregator_tf(n_ps.get_model_strategy(), len(n_w.get_all_workers()), FLAGS.nbbyzwrks)
gradient_aggregator = Aggregator_tf(n_ps.get_gradient_strategy(), len(n_ps.get_all_workers()), FLAGS.nbbyzwrks)
accuracy = 0
for iter in range(FLAGS.max_iter):
models = w.get_models(iter)
aggregated_model = model_aggregator.aggregate(models)
w.write_model(aggregated_model)
p.write_model(aggregated_model)
loss, grads = w.compute_gradients(iter)
w.commit_gradients(grads)
gradients = p.get_gradients(iter)
aggregated_gradient = gradient_aggregator.aggregate(gradients)
model = p.upate_model(aggregated_gradient)
p.commit_model(model)
tools.training_progression(FLAGS.max_iter, iter, accuracy)
if iter % 200 == 0:
accuracy = p.compute_accuracy()
print("Waiting for termination...")
p.wait_until_termination()
w.wait_until_termination()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
# Flags for defining current Node
parser.add_argument('--config_w',
type=str,
default="TF_CONFIG",
help='Config file location.')
parser.add_argument('--config_ps',
type=str,
default="TF_CONFIG",
help='Config file location.')
parser.add_argument('--log',
type=bool,
default=False,
help='Add flag to print intermediary steps.')
parser.add_argument('--max_iter',
type=int,
default="2000",
help='Maximum number of epoch')
parser.add_argument('--dataset',
type=str,
default="mnist",
help='Choose the dataset to use')
parser.add_argument('--model',
type=str,
default="Small",
help='Choose the model to use')
parser.add_argument('--batch_size',
type=int,
default=128,
help='Set the batch size')
parser.add_argument('--nbbyzwrks',
type=int,
default=0,
help='Set the number of byzantine workers (necessary for Krum aggregation)')
FLAGS, unparsed = parser.parse_known_args()
main()
|
the-stack_0_18314 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Usage:
% py.test test.py
"""
import os
import pytest
import numpy as np
import tempfile
from Ska.DBI import DBI
# If the SYBASE_OCS environment variable is set (from flt_envs) and the module exists
# on the system, do the Sybase tests.
HAS_SYBASE = ('SYBASE_OCS' in os.environ and
os.path.exists(
os.path.join(os.environ['SYBASE'],
os.environ['SYBASE_OCS'],
'python', 'python34_64r', 'lib', 'sybpydb.so')))
with open(os.path.join(os.path.dirname(__file__), 'ska_dbi_test_table.sql')) as fh:
TEST_TABLE_SQL = fh.read().strip()
class DBI_BaseTests(object):
def setup_class(cls):
if cls.db_config['dbi'] == 'sqlite':
cls.tmpdir = tempfile.TemporaryDirectory()
cls.db_config['server'] = os.path.join(cls.tmpdir.name, 'sqlite3.db3')
cls.db = DBI(**cls.db_config)
def teardown_class(cls):
# No matter what try to drop the testing table. Normally this should
# fail as a result of test_55.
try:
cls.db.execute('drop table ska_dbi_test_table')
except:
pass
cls.db.cursor.close()
cls.db.conn.close()
def test_05_force_drop_table(self):
try:
self.db.execute('drop table ska_dbi_test_table')
except self.db.Error:
pass
def test_10_create_table(self):
# Test execute with multiple cmds separated by ';\n'
self.db.execute(TEST_TABLE_SQL)
def test_15_insert_data(self):
for id_ in range(3):
data = dict(id=id_, tstart=2. + id_, tstop=3. + id_, obsid=4 + id_,
pcad_mode='npnt', aspect_mode='kalm', sim_mode='stop')
self.db.insert(data, 'ska_dbi_test_table')
def test_20_fetchall(self):
self.rows = self.db.fetchall('select * from ska_dbi_test_table')
assert len(self.rows) == 3
assert self.rows[1]['id'] == 1
def test_25_insert_row_from_db(self):
rows = self.db.fetchall('select * from ska_dbi_test_table')
row = rows[0]
row['id'] += 10
row['tstart'] = 5
self.db.insert(row, 'ska_dbi_test_table')
def test_30_fetchone(self):
row = self.db.fetchone('select * from ska_dbi_test_table')
assert row['obsid'] == 4
def test_35_fetch(self):
for i, row in enumerate(self.db.fetch('select * from ska_dbi_test_table')):
assert np.allclose(row['tstart'], 2. + i)
def test_40_fetch_null(self):
for row in self.db.fetch('select * from ska_dbi_test_table where id=100000'):
assert False
def test_45_fetchone_null(self):
row = self.db.fetchone('select * from ska_dbi_test_table where id=100000')
assert row is None
def test_50_fetchall_null(self):
rows = self.db.fetchall('select * from ska_dbi_test_table where id=100000')
assert len(rows) == 0
def test_55_drop_table(self):
self.db.execute('drop table ska_dbi_test_table')
class TestSqliteWithNumpy(DBI_BaseTests):
db_config = dict(dbi='sqlite', numpy=True)
class TestSqliteWithoutNumpy(DBI_BaseTests):
db_config = dict(dbi='sqlite', numpy=False)
@pytest.mark.skipif('not HAS_SYBASE', reason='No SYBASE_OCS and/or sybpydb.so')
class TestSybaseWithNumpy(DBI_BaseTests):
db_config = dict(dbi='sybase', server='sybase', user='aca_test',
database='aca_tstdb', numpy=True)
@pytest.mark.skipif('not HAS_SYBASE', reason='No SYBASE_OCS and/or sybpydb.so')
class TestSybaseWithoutNumpy(DBI_BaseTests):
db_config = dict(dbi='sybase', server='sybase', user='aca_test',
database='aca_tstdb', numpy=False)
def test_context_manager():
with DBI(dbi='sqlite', server=':memory:') as db:
db.execute(TEST_TABLE_SQL)
for id_ in range(3):
data = dict(id=id_, tstart=2. + id_, tstop=3. + id_, obsid=4 + id_,
pcad_mode='npnt', aspect_mode='kalm', sim_mode='stop')
db.insert(data, 'ska_dbi_test_table')
rows = db.fetchall('select * from ska_dbi_test_table')
assert len(rows) == 3
assert rows[1]['id'] == 1
# check that access fails now
with pytest.raises(Exception):
rows = db.fetchall('select * from ska_dbi_test_table')
|
the-stack_0_18316 | import os
from PyQt5.QtWidgets import QTableWidget, QTabWidget, QTableWidgetItem, QHeaderView
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QCursor, QFont
from sciQt.widgets import DictMenu, DictDialog
from sciQt.widgets.timing import TTLTable, DACTable, DDSTable, ADCTable
from sciQt.tools import parse_units
import numpy as np
from PyQt5.QtWidgets import QLineEdit
class CustomHeader(QHeaderView):
def __init__(self, table):
QHeaderView.__init__(self, Qt.Horizontal)
self.table = table
self.customContextMenuRequested.connect(table.context_menu)
self.sectionDoubleClicked.connect(table.update_duration)
self.setDefaultSectionSize(75+5)
self.setFixedHeight(35)
self.setContextMenuPolicy(Qt.CustomContextMenu)
def clone(self):
''' Returns a new header sharing the same model. '''
new_header = CustomHeader(self.table)
new_header.setModel(self.model())
return new_header
class TimingTable(QTableWidget):
''' A master timing table which shares timestep information and basic
functionalities with child i/o tables (e.g. TTLTable). '''
def __init__(self, sequence, ttls=None, dacs=None, dds=None, adcs=None, time_unit='s'):
QTableWidget.__init__(self)
self.time_unit = time_unit
self.children = []
self.set_sequence(sequence)
self.horizontal_margin = 5
self.label_width = 35
self.setHorizontalHeader(CustomHeader(self))
self.hold_column = None
self.menu = None
self.tabs = QTabWidget()
if ttls is not None:
self.ttl_table = TTLTable(self, ttls)
self.tabs.addTab(self.ttl_table, 'TTL')
if adcs is not None:
self.adc_table = ADCTable(self, adcs)
self.tabs.addTab(self.adc_table, 'ADC')
if dacs is not None:
self.dac_table = DACTable(self, dacs)
self.tabs.addTab(self.dac_table, 'DAC')
if dds is not None:
self.dds_table = DDSTable(self, dds)
self.tabs.addTab(self.dds_table, 'DDS')
@staticmethod
def apply_stylesheet(table):
''' Applies a generic stylesheet to a target child table. '''
stylesheet = f"""
QTableWidget {{color:"#000000";
font-weight: light;
font-family: "Exo 2";
font-size: 14px;
gridline-color: transparent;
border-right-color: transparent;
border-left-color: transparent;
border-color: transparent;}}
"""
table.setStyleSheet(stylesheet)
def context_menu(self, event):
''' Handles right-click menu on header items. '''
col = self.columnAt(event.x())
actions = {'Insert right': lambda: self.insert_timestep(col+1),
'Insert left': lambda: self.insert_timestep(col),
'Delete': lambda: self.delete_timestep(col),
'Hold': lambda: self.hold(col)}
self.menu = DictMenu('header options', actions)
self.menu.actions['Hold'].setCheckable(True)
self.menu.actions['Hold'].setChecked(col==self.hold_column)
self.menu.popup(QCursor.pos())
def delete_timestep(self, col):
''' Deletes a timestep. '''
self.removeColumn(col)
def get_sequence(self):
''' Retrieves subsequences from all child tables and aggregates into
a master sequence. '''
sequence = []
for col in range(self.columnCount()):
duration = self.horizontalHeaderItem(col).text().split('\n')[1]
magnitude, duration = parse_units(duration, base_unit='s')
name = self.horizontalHeaderItem(col).text().split('\n')[0]
sequence.append({'duration': magnitude})
if name != '':
sequence[-1]['name'] = name
for child in self.children:
subsequence = child.get_sequence()
for i, step in enumerate(subsequence):
sequence[i].update(step)
return sequence
def hold(self, col):
''' Sets the designated column as the hold column. Passing the same
column as the hold column will reset the hold column. '''
if col != self.hold_column:
self.hold_column = col
else:
self.hold_column = None
for i in range(self.columnCount()):
if self.hold_column is None:
self.horizontalHeaderItem(i).setForeground(Qt.black)
self.horizontalHeaderItem(i).setFont(QFont())
elif i != self.hold_column:
self.horizontalHeaderItem(i).setForeground(Qt.gray)
self.horizontalHeaderItem(i).setFont(QFont())
else:
self.horizontalHeaderItem(i).setForeground(Qt.black)
font = QFont()
font.setBold(True)
self.horizontalHeaderItem(i).setFont(font)
# self.setColumnHidden(i, False)
def insert_timestep(self, col):
''' Inserts a timestep after the specified column. '''
self.insertColumn(col)
self.setHorizontalHeaderItem(col, QTableWidgetItem('\n0'))
def register(self, child):
''' Registers a child widget to inherit from this one. '''
self.children.append(child)
child.setHorizontalHeader(self.horizontalHeader().clone())
child.model = child.horizontalHeader().model()
self.model().columnsInserted.connect(lambda index, first, last: child.insert_timestep(last))
self.model().columnsRemoved.connect(lambda index, first, last: child.delete_timestep(last))
child.set_sequence(self.sequence)
self.apply_stylesheet(child)
def set_sequence(self, sequence):
''' Applies a json-formatted sequence to all child tables. '''
self.setColumnCount(len(sequence))
labels = []
for step in sequence:
header = ''
if 'name' in step:
header += step['name']
magnitude, step['duration'] = parse_units(step['duration'], base_unit='s')
header += '\n' + str(step['duration'])
labels.append(header)
self.setHorizontalHeaderLabels(labels)
for child in self.children:
child.set_sequence(sequence)
self.sequence = sequence
def sizeHint(self):
''' Returns a size scaled based on number of columns. '''
return QSize(self.columnCount()*(75+self.horizontal_margin)+60+self.label_width,
400)
def update_duration(self, index):
''' Popup for timestep duration changes. '''
old_duration = self.horizontalHeaderItem(index).text().split('\n')[1]
old_name = self.horizontalHeaderItem(index).text().split('\n')[0]
parameters = {'Duration': old_duration, 'Name': old_name}
updates, updated = DictDialog(parameters, units={'Duration': self.time_unit}).get_parameters()
# magnitude, updates['Duration'] = parse_units(updates['Duration'], base_unit=s)
if 'Name' not in updates:
updates['Name'] = ''
string = f"{updates['Name']}\n{updates['Duration']}"
if updated:
self.horizontalHeaderItem(index).setText(string)
|
the-stack_0_18317 |
import pandas as pd
from typing import List
from macrosynergy.management.simulate_quantamental_data import make_qdf
from macrosynergy.management.shape_dfs import reduce_df
def make_relative_value(df: pd.DataFrame, xcats: List[str], cids: List[str] = None,
start: str = None, end: str = None, blacklist: dict = None,
basket: List[str] = None, complete_cross: bool = False,
rel_meth: str = 'subtract', rel_xcats: List[str] = None,
postfix: str = 'R'):
"""
Returns dataframe with values relative to an average for basket of cross sections
through subtraction or division.
:param <pd.DataFrame> df: standardized data frame with the following necessary
columns: 'cid', 'xcat', 'real_date' and 'value'.
:param <List[str]> xcats: all extended categories for which relative values are to
be calculated.
:param <List[str]> cids: cross-sections for which relative values are calculated.
Default is every cross-section available for each respective category.
:param <str> start: earliest date in ISO format. Default is None and earliest date
for which the respective category is available is used.
:param <str> end: latest date in ISO format. Default is None and latest date for
which the respective category is available is used.
:param <dict> blacklist: cross-sections with date ranges that should be excluded from
the output.
:param <List[str]> basket: cross-sections to be used for the relative value
benchmark. The default is every cross-section which is available in the dataframe
over the respective time-period. If the basket is not complete, covering all
cross-sections, the basket is required to be a valid subset of the available
cross-sections.
:param <bool> complete_cross: Boolean parameter that outlines whether each category
is required to have the full set of cross-sections held by the basket parameter.
Default is False. If False, the mean, for the relative value, will use the subset
that is available for that category. For instance, if basket = ['AUD', 'CAD',
'GBP', 'NZD'] but available cids = ['GBP', 'NZD'], the basket will be implicitly
updated to basket = ['GBP', 'NZD'] for that respective category.
:param <str> rel_meth: method for calculating relative value. Default is 'subtract'.
Alternative is 'divide'.
:param <List[str]> rel_xcats: addendum to extended category name to indicate relative
value used.
:param <str> postfix: acronym to be appended to 'xcat' string to give the name for
relative value category. Only applies if rel_xcats is None. Default is 'R'
:return <pd.Dataframe>: standardized dataframe with the relative values, featuring
the categories: 'cid', 'xcats', 'real_date' and 'value'.
"""
assert rel_meth in ['subtract', 'divide'], "rel_meth must be 'subtract' or 'divide'," \
"and not {rel_meth}."
assert isinstance(xcats, list) or isinstance(xcats, str), "List of categories " \
"expected, or a single" \
"category passed as a " \
"string object."
if isinstance(xcats, str):
xcats = [xcats]
if cids is None:
cids = list(df['cid'].unique())
if basket is not None:
miss = set(basket) - set(cids)
assert len(miss) == 0, f"The basket elements {miss} are not in specified or " \
f"are not available cross-sections."
else:
basket = cids # Default basket is all available cross-sections.
col_names = ['cid', 'xcat', 'real_date', 'value']
# Host dataframe.
df_out = pd.DataFrame(columns=col_names)
# Reduce the dataframe to the defined categories.
# If the categories passed to the parameter "xcats" are not present in the dataframe,
# the below function will classify their absence in the console, and return the
# reduced dataframe on the categories which are available in the received dataframe.
dfx = reduce_df(df, xcats, cids, start, end, blacklist,
out_all=False)
available_xcats = dfx['xcat'].unique()
if len(cids) == len(basket) == 1:
return df_out
intersection_function = lambda l_1, l_2: sorted(list(set(l_1) & set(l_2)))
# Implicit assumption that both categories are defined over the same cross-sections.
for i, xcat in enumerate(available_xcats):
df_xcat = dfx[dfx['xcat'] == xcat]
available_cids = df_xcat['cid'].unique()
# If True, all cross-sections defined in the "basket" data structure are
# available for the respective category.
intersection = intersection_function(basket, available_cids)
clause = len(intersection)
missing_cids = list(set(basket) - set(intersection))
if clause != len(basket) and complete_cross:
print(f"The category, {xcat}, is missing {missing_cids} which are included in"
f" the basket {basket}. Therefore, the category will be excluded from "
"the returned dataframe.")
continue
elif clause != len(basket):
print(f"The category, {xcat}, is missing {missing_cids}. "
f"The new basket will be {intersection}.")
dfx_xcat = df_xcat[['cid', 'real_date', 'value']]
# Reduce the dataframe to the specified basket.
dfb = dfx_xcat[dfx_xcat['cid'].isin(basket)]
if len(basket) > 1:
# Mean of (available) cross sections at each point in time. If all
# cross-sections defined in the "basket" data structure are not available for
# a specific date, compute the mean over the available subset.
bm = dfb.groupby(by='real_date').mean()
else:
# Relative value is mapped against a single cross-section.
bm = dfb.set_index('real_date')['value']
dfw = dfx_xcat.pivot(index='real_date', columns='cid', values='value')
# Taking an average and computing the relative value is only justified if the
# number of cross-sections, for the respective date, exceeds one. Therefore, if
# any rows have only a single cross-section, remove the dates from the dataframe.
dfw = dfw[dfw.count(axis=1) > 1]
dfa = pd.merge(dfw, bm, how='left', left_index=True, right_index=True)
if rel_meth == 'subtract':
dfo = dfa[dfw.columns].sub(dfa.loc[:, 'value'], axis=0)
else:
dfo = dfa[dfw.columns].div(dfa.loc[:, 'value'], axis=0)
# Re-stack.
df_new = dfo.stack().reset_index().rename({'level_1': 'cid', 0: 'value'},
axis=1)
if rel_xcats is None:
df_new['xcat'] = xcat + postfix
else:
df_new['xcat'] = rel_xcats[i]
df_new = df_new.sort_values(['cid', 'real_date'])[col_names]
df_out = df_out.append(df_new)
return df_out.reset_index(drop=True)
if __name__ == "__main__":
# Simulate dataframe.
cids = ['AUD', 'CAD', 'GBP', 'NZD']
xcats = ['XR', 'CRY', 'GROWTH', 'INFL']
df_cids = pd.DataFrame(index=cids, columns=['earliest', 'latest', 'mean_add',
'sd_mult'])
df_cids.loc['AUD'] = ['2000-01-01', '2020-12-31', 0.1, 1]
df_cids.loc['CAD'] = ['2001-01-01', '2020-11-30', 0, 1]
df_cids.loc['GBP'] = ['2002-01-01', '2020-11-30', 0, 2]
df_cids.loc['NZD'] = ['2002-01-01', '2020-09-30', -0.1, 2]
df_xcats = pd.DataFrame(index=xcats, columns=['earliest', 'latest', 'mean_add',
'sd_mult', 'ar_coef', 'back_coef'])
df_xcats.loc['XR'] = ['2000-01-01', '2020-12-31', 0.1, 1, 0, 0.3]
df_xcats.loc['CRY'] = ['2000-01-01', '2020-10-30', 1, 2, 0.95, 1]
df_xcats.loc['GROWTH'] = ['2001-01-01', '2020-10-30', 1, 2, 0.9, 1]
df_xcats.loc['INFL'] = ['2001-01-01', '2020-10-30', 1, 2, 0.8, 0.5]
dfd = make_qdf(df_cids, df_xcats, back_ar=0.75)
# Simulate blacklist
black = {'AUD': ['2000-01-01', '2003-12-31'], 'GBP': ['2018-01-01', '2100-01-01']}
# Applications
dfd_1 = make_relative_value(dfd, xcats=['GROWTH', 'INFL'], cids=None,
blacklist=None, rel_meth='subtract', rel_xcats=None,
postfix='RV')
dfd_2 = make_relative_value(dfd, xcats=['XR', 'GROWTH', 'INFL'], cids=None,
blacklist=None, basket=['AUD', 'CAD', 'GBP'],
rel_meth='subtract', rel_xcats=['XRvB3', 'GROWTHvB3',
'INFLvB3'])
dfd_3 = make_relative_value(dfd, xcats=['GROWTH', 'INFL'], cids=None,
blacklist=None, basket=['AUD'],
rel_meth='subtract', rel_xcats=None, postfix='RV')
# Contrived test examples.
dfd_4 = make_relative_value(dfd, xcats=['GROWTH', 'INFL'], cids=['AUD', 'CAD'],
blacklist=None, basket=['AUD'],
rel_meth='subtract', rel_xcats=None, postfix='RV')
dfd_5 = make_relative_value(dfd, xcats=['GROWTH', 'INFL'], cids=['AUD'],
blacklist=None, basket=['AUD'],
rel_meth='subtract', rel_xcats=None, postfix='RV')
# Testing for complete-cross parameter.
xcats = ['XR', 'CRY']
start = '2000-01-01'
end = '2020-12-31'
dfx = reduce_df(df=dfd, xcats=xcats, cids=cids, start=start,
end=end, blacklist=None, out_all=False)
# On the reduced dataframe, remove a single cross-section from one of the
# categories.
filt1 = ~((dfx['cid'] == 'AUD') & (dfx['xcat'] == 'XR'))
dfdx = dfx[filt1]
# Pass in the filtered dataframe.
dfd_rl = make_relative_value(dfdx, xcats=xcats, cids=cids, start=start,
end=end, blacklist=None, basket=None,
complete_cross=True, rel_meth='subtract',
rel_xcats=None, postfix='RV') |
the-stack_0_18321 | import logging
import Adafruit_BME280
from blinker import signal
class BME280:
def __init__(self, address):
logging.info('Initialising BME280 sensor with address {}'.format(address))
self._sensor = Adafruit_BME280.BME280(address=address)
def get_temperature(self):
"""
Return measured temperature from the sensor.
:return:
"""
logging.debug('Measuring temperature')
temperature = self._sensor.read_temperature()
logging.info('Broadcasting temperature: {}'.format(temperature))
temperature_signal = signal('temperature')
temperature_signal.send(self, temperature=temperature)
def get_humidity(self):
"""
Return measured humidity from the sensor.
:return:
"""
logging.debug('Measuring humidity')
humidity = self._sensor.read_humidity()
logging.info('Broadcasting humidity: {}'.format(humidity))
humidity_signal = signal('humidity')
humidity_signal.send(self, humidity=humidity)
def get_pressure(self):
"""
Return measured pressure from the sensor.
:return:
"""
logging.debug('Measuring pressure')
pressure = self._convert_to_hectopascals(pressure=self._sensor.read_pressure())
logging.info('Broadcasting pressure: {}'.format(pressure))
pressure_signal = signal('pressure')
pressure_signal.send(self, pressure=pressure)
def _convert_to_hectopascals(self, pressure):
"""
Converts pressure to hectopascals
:param float pressure:
:return float:
"""
return pressure / 100.0
|
the-stack_0_18322 | import random
import numpy as np
import pandas as pd
from pyziabm.orderbook3 import Orderbook
from pyziabm.trader2017_r3 import Provider, Provider5, Taker, MarketMaker, MarketMaker5, PennyJumper
class Runner(object):
def __init__(self, prime1=20, num_mms=1, mm_maxq=1, mm_quotes=12, mm_quote_range=60, mm_delta=0.025,
num_takers=50, taker_maxq=1, num_providers=38, provider_maxq=1, q_provide=0.5,
alpha=0.0375, mu=0.001, delta=0.025, lambda0=100, wn=0.001, c_lambda=1.0, run_steps=100000,
mpi=5, h5filename='test.h5', pj=False, alpha_pj=0):
self.alpha_pj = alpha_pj
self.q_provide = q_provide
self.lambda0 = lambda0
self.run_steps = run_steps+1
self.h5filename = h5filename
self.t_delta_t, self.taker_array = self.make_taker_array(taker_maxq, num_takers, mu)
self.t_delta_p, self.provider_array = self.make_provider_array(provider_maxq, num_providers, delta, mpi, alpha)
self.t_delta_m, self.marketmaker_array = self.make_marketmaker_array(mm_maxq, num_mms, mm_quotes, mm_quote_range, mm_delta, mpi)
self.pennyjumper = self.make_pennyjumper(mpi)
self.exchange = Orderbook()
self.q_take, self.lambda_t = self.make_q_take(wn, c_lambda)
self.trader_dict = self.make_traders(num_takers, num_providers, num_mms)
self.seed_orderbook()
self.make_setup(prime1)
if pj:
self.run_mcsPJ(prime1)
else:
self.run_mcs(prime1)
self.exchange.trade_book_to_h5(h5filename)
self.out_to_h5()
def seed_orderbook(self):
seed_provider = Provider('p999999', 1, 5, 0.05)
self.trader_dict.update({'p999999': seed_provider})
ba = random.choice(range(1000005, 1002001, 5))
bb = random.choice(range(997995, 999996, 5))
qask = {'order_id': 'p999999_a', 'timestamp': 0, 'type': 'add', 'quantity': 1, 'side': 'sell',
'price': ba, 'exid': 99999999}
qbid = {'order_id': 'p999999_b', 'timestamp': 0, 'type': 'add', 'quantity': 1, 'side': 'buy',
'price': bb, 'exid': 99999999}
seed_provider.local_book['p999999_a'] = qask
self.exchange.add_order_to_book(qask)
self.exchange.order_history.append(qask)
seed_provider.local_book['p999999_b'] = qbid
self.exchange.add_order_to_book(qbid)
self.exchange.order_history.append(qbid)
def make_taker_array(self, maxq, num_takers, mu):
default_arr = np.array([1, 5, 10, 25, 50])
actual_arr = default_arr[default_arr<=maxq]
taker_size = np.random.choice(actual_arr, num_takers)
t_delta_t = np.floor(np.random.exponential(1/mu, num_takers)+1)*taker_size
takers_list = ['t%i' % i for i in range(num_takers)]
takers = np.array([Taker(t,i) for t,i in zip(takers_list,taker_size)])
return t_delta_t, takers
def make_provider_array(self, maxq, num_providers, delta, mpi, alpha):
default_arr = np.array([1, 5, 10, 25, 50])
actual_arr = default_arr[default_arr<=maxq]
provider_size = np.random.choice(actual_arr, num_providers)
t_delta_p = np.floor(np.random.exponential(1/alpha, num_providers)+1)*provider_size
providers_list = ['p%i' % i for i in range(num_providers)]
if mpi==1:
providers = np.array([Provider(p,i,mpi,delta) for p,i in zip(providers_list,provider_size)])
else:
providers = np.array([Provider5(p,i,mpi,delta) for p,i in zip(providers_list,provider_size)])
return t_delta_p, providers
def make_marketmaker_array(self, maxq, num_mms, mm_quotes, mm_quote_range, mm_delta, mpi):
default_arr = np.array([1, 5, 10, 25, 50])
actual_arr = default_arr[default_arr<=maxq]
provider_size = np.random.choice(actual_arr, num_mms)
t_delta_m = maxq
marketmakers_list = ['m%i' % i for i in range(num_mms)]
if mpi==1:
marketmakers = np.array([MarketMaker(p,i,mpi,mm_delta,mm_quotes,mm_quote_range) for p,i in zip(marketmakers_list,provider_size)])
else:
marketmakers = np.array([MarketMaker5(p,i,mpi,mm_delta,mm_quotes,mm_quote_range) for p,i in zip(marketmakers_list,provider_size)])
return t_delta_m, marketmakers
def make_pennyjumper(self, mpi):
return PennyJumper('j0', 1, mpi)
def make_traders(self, num_takers, num_providers, num_mms):
takers_dict = dict(zip(['t%i' % i for i in range(num_takers)], list(self.taker_array)))
providers_dict = dict(zip(['p%i' % i for i in range(num_providers)], list(self.provider_array)))
takers_dict.update(providers_dict)
marketmakers_dict = dict(zip(['m%i' % i for i in range(num_mms)], list(self.marketmaker_array)))
takers_dict.update(marketmakers_dict)
if self.alpha_pj > 0:
takers_dict.update({'j0': self.pennyjumper})
return takers_dict
def make_providers(self, step):
providers = self.provider_array[np.remainder(step, self.t_delta_p)==0]
np.random.shuffle(providers)
return providers
def make_both(self, step):
providers_mask = np.remainder(step, self.t_delta_p)==0
takers_mask = np.remainder(step, self.t_delta_t)==0
marketmakers_mask = np.remainder(step, self.t_delta_m)==0
providers = np.vstack((self.provider_array, providers_mask)).T
takers = np.vstack((self.taker_array, takers_mask)).T
marketmakers = np.vstack((self.marketmaker_array, marketmakers_mask)).T
traders = np.vstack((providers, marketmakers, takers[takers_mask]))
np.random.shuffle(traders)
return traders
def make_q_take(self, s, c_lambda):
noise = np.random.rand(2,self.run_steps)
qt_take = np.empty_like(noise)
qt_take[:,0] = 0.5
for i in range(1,self.run_steps):
qt_take[:,i] = qt_take[:,i-1] + (noise[:,i-1]>qt_take[:,i-1])*s - (noise[:,i-1]<qt_take[:,i-1])*s
lambda_t = -self.lambda0*(1 + (np.abs(qt_take[1] - 0.5)/np.sqrt(np.mean(np.square(qt_take[0] - 0.5))))*c_lambda)
return qt_take[1], lambda_t
def qtake_to_h5(self):
temp_df = pd.DataFrame({'qt_take': self.q_take, 'lambda_t': self.lambda_t})
temp_df.to_hdf(self.h5filename, 'qtl', append=True, format='table', complevel=5, complib='blosc')
def mm_profitability_to_h5(self):
for m in self.marketmaker_array:
temp_df = pd.DataFrame(m.cash_flow_collector)
temp_df.to_hdf(self.h5filename, 'mmp', append=True, format='table', complevel=5, complib='blosc')
def out_to_h5(self):
self.qtake_to_h5()
self.mm_profitability_to_h5()
def make_setup(self, prime1):
top_of_book = self.exchange.report_top_of_book(0)
for current_time in range(1, prime1):
for p in self.make_providers(current_time):
p.process_signal(current_time, top_of_book, self.q_provide, -self.lambda0)
self.exchange.process_order(p.quote_collector[-1])
top_of_book = self.exchange.report_top_of_book(current_time)
def run_mcs(self, prime1):
top_of_book = self.exchange.report_top_of_book(prime1)
for current_time in range(prime1, self.run_steps):
for row in self.make_both(current_time):
if row[0].trader_type == 'Provider':
if row[1]:
row[0].process_signal(current_time, top_of_book, self.q_provide, self.lambda_t[current_time])
self.exchange.process_order(row[0].quote_collector[-1])
top_of_book = self.exchange.report_top_of_book(current_time)
row[0].bulk_cancel(current_time)
if row[0].cancel_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
for c in row[0].cancel_collector:
self.exchange.process_order(c)
if self.exchange.confirm_modify_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
row[0].confirm_cancel_local(self.exchange.confirm_modify_collector[0])
top_of_book = self.exchange.report_top_of_book(current_time)
elif row[0].trader_type == 'MarketMaker':
if row[1]:
row[0].process_signal(current_time, top_of_book, self.q_provide)
for q in row[0].quote_collector:
self.exchange.process_order(q)
top_of_book = self.exchange.report_top_of_book(current_time)
row[0].bulk_cancel(current_time)
if row[0].cancel_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
for c in row[0].cancel_collector:
self.exchange.process_order(c)
if self.exchange.confirm_modify_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
row[0].confirm_cancel_local(self.exchange.confirm_modify_collector[0])
top_of_book = self.exchange.report_top_of_book(current_time)
else:
row[0].process_signal(current_time, self.q_take[current_time])
self.exchange.process_order(row[0].quote_collector[-1])
if self.exchange.traded: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
for c in self.exchange.confirm_trade_collector:
trader = self.trader_dict[c['trader']]
trader.confirm_trade_local(c)
top_of_book = self.exchange.report_top_of_book(current_time)
if not np.remainder(current_time, 2000):
self.exchange.order_history_to_h5(self.h5filename)
self.exchange.sip_to_h5(self.h5filename)
def run_mcsPJ(self, prime1):
top_of_book = self.exchange.report_top_of_book(prime1)
for current_time in range(prime1, self.run_steps):
for row in self.make_both(current_time):
if row[0].trader_type == 'Provider':
if row[1]:
row[0].process_signal(current_time, top_of_book, self.q_provide, self.lambda_t[current_time])
self.exchange.process_order(row[0].quote_collector[-1])
top_of_book = self.exchange.report_top_of_book(current_time)
row[0].bulk_cancel(current_time)
if row[0].cancel_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
for c in row[0].cancel_collector:
self.exchange.process_order(c)
if self.exchange.confirm_modify_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
row[0].confirm_cancel_local(self.exchange.confirm_modify_collector[0])
top_of_book = self.exchange.report_top_of_book(current_time)
elif row[0].trader_type == 'MarketMaker':
if row[1]:
row[0].process_signal(current_time, top_of_book, self.q_provide)
for q in row[0].quote_collector:
self.exchange.process_order(q)
top_of_book = self.exchange.report_top_of_book(current_time)
# row[0].bulk_cancel(self.delta*2, current_time, self.q_take[current_time])
row[0].bulk_cancel(current_time)
if row[0].cancel_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
for c in row[0].cancel_collector:
self.exchange.process_order(c)
if self.exchange.confirm_modify_collector: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
row[0].confirm_cancel_local(self.exchange.confirm_modify_collector[0])
top_of_book = self.exchange.report_top_of_book(current_time)
else:
row[0].process_signal(current_time, self.q_take[current_time])
self.exchange.process_order(row[0].quote_collector[-1])
if self.exchange.traded: # <---- Check permission versus forgiveness here and elsewhere - move to methods?
for c in self.exchange.confirm_trade_collector:
trader = self.trader_dict[c['trader']]
trader.confirm_trade_local(c)
top_of_book = self.exchange.report_top_of_book(current_time)
if random.uniform(0,1) < self.alpha_pj:
self.pennyjumper.process_signal(current_time, top_of_book, self.q_take[current_time])
if self.pennyjumper.cancel_collector:
for c in self.pennyjumper.cancel_collector:
self.exchange.process_order(c)
if self.pennyjumper.quote_collector:
for q in self.pennyjumper.quote_collector:
self.exchange.process_order(q)
top_of_book = self.exchange.report_top_of_book(current_time)
if not np.remainder(current_time, 2000):
self.exchange.order_history_to_h5(self.h5filename)
self.exchange.sip_to_h5(self.h5filename)
|
the-stack_0_18323 | import numpy as np
from torch.utils.tensorboard import SummaryWriter
class TensorboardVisualizer:
@staticmethod
def modify_commandline_options(parser, is_train=True):
if is_train:
parser.add_argument('--tbvis_iteration_update_rate', type=int, default=1000,
help='Number of iterations steps before writing statistics to tensorboard.')
parser.add_argument('--tbvis_disable_report_weights', action='store_true',
help='Whether to not report the network weights change')
parser.add_argument('--tbvis_disable_report_offsets', action='store_true',
help='Whether to not report mean deformation offsets in x and y direction.')
return parser
def __init__(self, mirnet_model, networks_names, losses_names, opt):
self.writer = None
self.writer_log_dir = '{}/{}/{}_tensorboard_logs'.format(opt.checkpoints_dir, opt.name, opt.name)
self.enabled = False
self.model = mirnet_model
self.networks_names = networks_names
self.networks = {}
self.iteration_update_rate = opt.tbvis_iteration_update_rate
self.iteration_cnt = 0
self.save_count = 0
self.offset_x = 0.0
self.offset_y = 0.0
self.offset_cnt = 1
self.losses_names = losses_names
self.report_weights = not opt.tbvis_disable_report_weights
self.report_offsets = not opt.tbvis_disable_report_offsets
def save_current_weights(self):
for net_name, net in self.networks.items():
for n, p in net.named_parameters():
if p.requires_grad:
suffix = 'Bias' if ("bias" in n) else 'Weight'
name = '{}/data/{}/{}'.format(net_name, suffix, n)
self.writer.add_histogram(name, p.clone().cpu().data.numpy(), self.save_count)
def save_histogram(self, name, tensor):
if name not in self.image_step:
self.image_step[name] = 0
self.image_count[name] = -1
self.image_count[name] = (self.image_count[name] + 1) % self.grads_update_rate
if self.image_count[name] != 0:
return
step = self.image_step[name] + 1
self.image_step[name] = step
tensor = tensor.detach().cpu().numpy()
self.writer.add_histogram(name, tensor, step)
def save_current_losses(self):
for lname in self.losses_names:
loss_val = getattr(self.model, 'loss_{}'.format(lname))
self.writer.add_scalar('loss/{}'.format(lname), loss_val, self.save_count)
def save_offsets(self):
mean_x = self.offset_x / self.offset_cnt
self.writer.add_scalar('offset/mean_x', mean_x, self.save_count)
mean_y = self.offset_y / self.offset_cnt
self.writer.add_scalar('offset/mean_y', mean_y, self.save_count)
self.offset_x = self.offset_y = 0.0
self.offset_cnt = 0.0
def iteration_step(self):
if not self.enabled:
return
if self.report_offsets:
offset = self.model.deformation_field_A_to_B.data.cpu().numpy()
self.offset_x += np.mean(offset[:, 0, ...])
self.offset_y += np.mean(offset[:, 1, ...])
self.offset_cnt += 1
if self.iteration_update_rate <= 0:
return
if self.iteration_cnt == 0:
self.save_current_losses()
if self.report_weights:
self.save_current_weights()
if self.report_offsets:
self.save_offsets()
self.save_count += 1
self.iteration_cnt = (self.iteration_cnt + 1) % self.iteration_update_rate
def epoch_step(self):
# Don't report statistics if the update is in iteration resolution.
if not self.enabled or self.iteration_update_rate > 0:
return
self.save_current_losses()
if self.report_weights:
self.save_current_weights()
if self.report_offsets:
self.save_offsets()
self.save_count += 1
def end(self):
self.writer.close()
def enable(self):
self.enabled = True
self.writer = SummaryWriter(self.writer_log_dir)
for net_name in self.networks_names:
self.networks[net_name] = getattr(self.model, net_name)
|
the-stack_0_18327 | #!/usr/bin/env python
# encoding: utf-8
# @author: Zhipeng Ye
# @contact: [email protected]
# @file: main.py
# @time: 2019-12-21 18:41
# @desc:
import re
import os
import math
class LanguageModelContent:
def __init__(self, possibility, words, punishment=''):
self.possibility = possibility
self.words = words
self.punishment = punishment
def __str__(self):
return self.possibility + '\t' + self.words + '\t' + self.punishment
def write_language_1model(unary_model, binary_class_count, unary_count):
with open('output.txt', 'a') as file:
file.write('ngram 2=' + str(binary_class_count) + '\n')
file.write('\n')
file.write('\\1-grams:\n')
# count 1 length word and n length word, n>1
unary_model_keys = unary_model.keys()
language_1models = []
for key in unary_model_keys:
count = unary_model.get(key)
conditional_possibility = math.log10(count / unary_count)
conditional_possibility = round(conditional_possibility, 6)
model1 = LanguageModelContent(str(conditional_possibility), key, str(-99))
language_1models.append(model1)
with open('output.txt', 'a') as file:
for model in language_1models:
file.write(str(model) + '\n')
file.write('\n')
file.write('\\2-grams:\n')
if __name__ == "__main__":
with open('output.txt', 'a') as file:
file.write('\data\\\n')
unary_model = {}
unary_class_count = 0
unary_count = 0
with open('/Users/geekye/Documents/Dataset/LM/UniBiGram/ngrams-00000-of-00394') as file:
for line in file:
if re.match('^[\u4e00-\u9fa5]{1,}[\s\t]{1,}\d{1,}', line) and line != '' and line != '\n':
content = line.split('\t')
key = content[0]
value = int(content[1])
unary_count = unary_count + value
unary_class_count = unary_class_count + 1
unary_model[key] = value
with open('output.txt', 'a') as file:
file.write('ngram 1=' + str(unary_class_count) + '\n')
binary_class_count = 0
files = sorted(os.listdir('/Users/geekye/Documents/Dataset/LM/UniBiGram'))
files_valid = [file for file in files if file != 'ngrams-00000-of-00394']
language_1model_switch = True
for binary_file in files_valid:
language_models = []
with open('/Users/geekye/Documents/Dataset/LM/UniBiGram/' + binary_file) as file:
for line in file:
if re.match('^[\u4e00-\u9fa5]{1,8}[\s\t]{1,}[\u4e00-\u9fa5]{1,8}[\s\t]{1,}\d{1,}',line) and line != '' and line != '\n':
content = re.split('[\s\t]+', line)
under_word = content[0]
after_word = content[1]
if re.match('^[\u4e00-\u9fa5]{1,}', after_word):
words = under_word + ' ' + after_word
union_count = int(content[2])
binary_class_count = binary_class_count + 1
count = unary_model.get(under_word)
if count is not None:
conditional_possibility = union_count / count
conditional_possibility = math.log10(conditional_possibility)
conditional_possibility = round(conditional_possibility, 6)
language_model = LanguageModelContent(str(conditional_possibility), words)
language_models.append(language_model)
# this process can run in first loop
if language_1model_switch:
write_language_1model(unary_model, binary_class_count, unary_count)
language_1model_switch = False
with open('output.txt', 'a') as file:
for model in language_models:
file.write(str(model) + '\n')
with open('output.txt', 'a') as file:
file.write('\\end\\')
file.write('ngram 2=' + str(binary_class_count) + '\n')
|
the-stack_0_18328 | # Implementation of the Hidden Markov Model for discrete observations
# Author: Gerardo Duran-Martin (@gerdm)
import numpy as np
from numpy.random import seed, choice
class HMMDiscrete:
def __init__(self, A, px, pi):
"""
This class simulates a Hidden Markov Model with
categorical distribution
Parameters
----------
A: array(state_size, state_size)
State transition matrix
px: array(state_size, observation_size)
Matrix of conditional categorical probabilities
of obsering the ith category
pi: array(state_size)
Array of initial-state probabilities
"""
self.A = A
self.px = px
self.pi = pi
self.state_size, self.observation_size = px.shape
def sample(self, n_samples, random_state=None):
seed(random_state)
latent_states = np.arange(self.state_size)
obs_states = np.arange(self.observation_size)
z_hist = np.zeros(n_samples, dtype=int)
x_hist = np.zeros(n_samples, dtype=int)
zt = choice(latent_states, p=self.pi)
xt = choice(obs_states, p=self.px[zt])
z_hist[0] = zt
x_hist[0] = xt
for t in range(1, n_samples):
zt = choice(latent_states, p=self.A[zt])
xt = choice(obs_states, p=self.px[zt])
z_hist[t] = zt
x_hist[t] = xt
return z_hist, x_hist
def forwards(self, x_hist):
"""
Calculates a belief state
Parameters
----------
x_hist: array(n_samples)
History of observed states
Returns
-------
* array(n_samples, n_hidden) :
All alpha values found for each sample
* float
The loglikelihood giving log(p(x|model))
"""
n_samples = len(x_hist)
alpha_hist = np.zeros((n_samples, self.state_size))
c_elements = np.zeros(n_samples) # normalization constants
alpha_n = self.pi * self.px[:, x_hist[0]]
cn = alpha_n.sum()
alpha_n = alpha_n / cn
alpha_hist[0] = alpha_n
c_elements[0] = cn
for t in range(1, n_samples):
alpha_n = self.px[:, x_hist[t]] * (alpha_n[:, None] * self.A).sum(axis=0)
cn = alpha_n.sum()
alpha_n = alpha_n / cn
alpha_hist[t] = alpha_n
c_elements[t] = cn
return alpha_hist, np.sum(np.log(c_elements))
def backwards_filtering(self, x_hist):
n_samples = len(x_hist)
beta_next = np.ones(self.state_size)
beta_hist = np.zeros((n_samples, self.state_size))
beta_hist[-1] = beta_next
for t in range(2, n_samples + 1):
beta_next = (beta_next * self.px[:, x_hist[-t + 1]] * self.A).sum(axis=1)
beta_next = beta_next / beta_next.sum()
beta_hist[-t] = beta_next
return beta_hist
def forwards_backwards(self, x_hist, alpha_hist=None, beta_hist=None):
if alpha_hist is None:
alpha_hist, _ = self.forwards(x_hist)
if beta_hist is None:
beta_hist = self.backwards_filtering(x_hist)
gamma = alpha_hist * beta_hist
return gamma / gamma.sum(axis=1).reshape((-1, 1))
def map_state(self, x_hist):
"""
Compute the most probable sequence of states
Parameters
----------
x_hist: array(n_samples)
History of observed states
Returns
-------
* array(n_samples)
Sequence of most MAP probable sequence of states
"""
n_samples = len(x_hist)
logp_hist = np.zeros((n_samples, self.state_size))
wn = np.log(self.A) + np.log(self.pi) + np.log(self.px[:, x_hist[0]])
wn = wn.max(axis=1)
logp_hist[0] = wn
for t in range(1, n_samples):
wn = np.log(self.A) + np.log(self.px[:, x_hist[t]]) + wn
wn = wn.max(axis=1)
logp_hist[t] = wn
return logp_hist.argmax(axis=1) |
the-stack_0_18330 | # Binary search function
def binarySearch(xlist, key):
a = 0
b = len(xlist)
while a < b:
c = (a + b)//2
if xlist[c] > key:
b = c
elif xlist[c] < key:
a = c + 1
else:
return c
return -1
# input a list of elements
xlist = input('Enter the sorted list of numbers: ')
#split a element
xlist = xlist.split()
xlist = [int(x) for x in xlist]
# search for in list
key = int(input('The number to search for: '))
# call binary search function
index = binarySearch(xlist, key)
if index < 0:
print('{} was not found.'.format(key))
else:
print('{} was found at index {}.'.format(key, index))
|
the-stack_0_18331 | #!/usr/bin/env python3.7
import os
import json
import copy
import datetime
import psutil
from smbus2 import SMBus
from cereal import log
from common.android import ANDROID, get_network_type, get_network_strength
from common.basedir import BASEDIR
from common.params import Params
from common.realtime import sec_since_boot, DT_TRML
from common.numpy_fast import clip, interp
from common.filter_simple import FirstOrderFilter
from selfdrive.version import terms_version, training_version
from selfdrive.swaglog import cloudlog
import cereal.messaging as messaging
from selfdrive.loggerd.config import get_available_percent
from selfdrive.pandad import get_expected_signature
from selfdrive.thermald.power_monitoring import PowerMonitoring, get_battery_capacity, get_battery_status, get_battery_current, get_battery_voltage, get_usb_present
FW_SIGNATURE = get_expected_signature()
ThermalStatus = log.ThermalData.ThermalStatus
NetworkType = log.ThermalData.NetworkType
NetworkStrength = log.ThermalData.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
DAYS_NO_CONNECTIVITY_MAX = 0 # do not allow to engage after a week without internet
DAYS_NO_CONNECTIVITY_PROMPT = 0 # send an offroad prompt after 4 days with no internet
LEON = False
last_eon_fan_val = None
with open(BASEDIR + "/selfdrive/controls/lib/alerts_offroad.json") as json_file:
OFFROAD_ALERTS = json.load(json_file)
def read_tz(x, clip=True):
if not ANDROID:
# we don't monitor thermal on PC
return 0
try:
with open("/sys/devices/virtual/thermal/thermal_zone%d/temp" % x) as f:
ret = int(f.read())
if clip:
ret = max(0, ret)
except FileNotFoundError:
return 0
return ret
def read_thermal():
dat = messaging.new_message('thermal')
dat.thermal.cpu0 = read_tz(5)
dat.thermal.cpu1 = read_tz(7)
dat.thermal.cpu2 = read_tz(10)
dat.thermal.cpu3 = read_tz(12)
dat.thermal.mem = read_tz(2)
dat.thermal.gpu = read_tz(16)
dat.thermal.bat = read_tz(29)
dat.thermal.pa0 = read_tz(25)
return dat
def setup_eon_fan():
global LEON
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
LEON = True
bus.close()
def set_eon_fan(val):
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
#bus.write_i2c_block_data(0x67, 0x45, [1<<2])
else:
#bus.write_i2c_block_data(0x67, 0x45, [0])
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val-1)<<6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
# max fan speed only allowed if battery is hot
_BAT_TEMP_THERSHOLD = 45.
def handle_fan_eon(max_cpu_temp, bat_temp, fan_speed, ignition):
new_speed_h = next(speed for speed, temp_h in zip(_FAN_SPEEDS, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_FAN_SPEEDS, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
if bat_temp < _BAT_TEMP_THERSHOLD:
# no max fan speed unless battery is hot
fan_speed = min(fan_speed, _FAN_SPEEDS[-2])
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(max_cpu_temp, bat_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(30, new_speed)
return new_speed
def thermald_thread():
# prevent LEECO from undervoltage
BATT_PERC_OFF = 10 if LEON else 3
health_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected health frequency
# now loop
thermal_sock = messaging.pub_sock('thermal')
health_sock = messaging.sub_sock('health', timeout=health_timeout)
location_sock = messaging.sub_sock('gpsLocation')
ignition = False
fan_speed = 0
count = 0
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
thermal_status_prev = ThermalStatus.green
usb_power = True
usb_power_prev = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
health_prev = None
fw_version_match_prev = True
current_connectivity_alert = None
time_valid_prev = True
should_start_prev = False
is_uno = (read_tz(29, clip=False) < -1000)
if is_uno or not ANDROID:
handle_fan = handle_fan_uno
else:
setup_eon_fan()
handle_fan = handle_fan_eon
params = Params()
pm = PowerMonitoring()
while 1:
health = messaging.recv_sock(health_sock, wait=True)
location = messaging.recv_sock(location_sock)
location = location.gpsLocation if location else None
msg = read_thermal()
# clear car params when panda gets disconnected
if health is None and health_prev is not None:
params.panda_disconnect()
health_prev = health
if health is not None:
usb_power = health.health.usbPowerMode != log.HealthData.UsbPowerMode.client
# get_network_type is an expensive call. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = get_network_type()
network_strength = get_network_strength(network_type)
except Exception:
cloudlog.exception("Error getting network status")
msg.thermal.freeSpace = get_available_percent(default=100.0) / 100.0
msg.thermal.memUsedPercent = int(round(psutil.virtual_memory().percent))
msg.thermal.cpuPerc = int(round(psutil.cpu_percent()))
msg.thermal.networkType = network_type
msg.thermal.networkStrength = network_strength
msg.thermal.batteryPercent = get_battery_capacity()
msg.thermal.batteryStatus = get_battery_status()
msg.thermal.batteryCurrent = get_battery_current()
msg.thermal.batteryVoltage = get_battery_voltage()
msg.thermal.usbOnline = get_usb_present()
# Fake battery levels on uno for frame
if is_uno:
msg.thermal.batteryPercent = 100
msg.thermal.batteryStatus = "Charging"
current_filter.update(msg.thermal.batteryCurrent / 1e6)
# TODO: add car battery voltage check
max_cpu_temp = max(msg.thermal.cpu0, msg.thermal.cpu1,
msg.thermal.cpu2, msg.thermal.cpu3) / 10.0
max_comp_temp = max(max_cpu_temp, msg.thermal.mem / 10., msg.thermal.gpu / 10.)
bat_temp = msg.thermal.bat / 1000.
fan_speed = handle_fan(max_cpu_temp, bat_temp, fan_speed, ignition)
msg.thermal.fanSpeed = fan_speed
# thermal logic with hysterisis
if max_cpu_temp > 107. or bat_temp >= 63.:
# onroad not allowed
thermal_status = ThermalStatus.danger
elif max_comp_temp > 92.5 or bat_temp > 60.: # CPU throttling starts around ~90C
# hysteresis between onroad not allowed and engage not allowed
thermal_status = clip(thermal_status, ThermalStatus.red, ThermalStatus.danger)
elif max_cpu_temp > 87.5:
# hysteresis between engage not allowed and uploader not allowed
thermal_status = clip(thermal_status, ThermalStatus.yellow, ThermalStatus.red)
elif max_cpu_temp > 80.0:
# uploader not allowed
thermal_status = ThermalStatus.yellow
elif max_cpu_temp > 75.0:
# hysteresis between uploader not allowed and all good
thermal_status = clip(thermal_status, ThermalStatus.green, ThermalStatus.yellow)
else:
# all good
thermal_status = ThermalStatus.green
# **** starting logic ****
# Check for last update time and display alerts if needed
now = datetime.datetime.now()
# show invalid date/time alert
time_valid = now.year >= 2019
if time_valid and not time_valid_prev:
params.delete("Offroad_InvalidTime")
if not time_valid and time_valid_prev:
params.put("Offroad_InvalidTime", json.dumps(OFFROAD_ALERTS["Offroad_InvalidTime"]))
time_valid_prev = time_valid
# Show update prompt
try:
last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
except (TypeError, ValueError):
last_update = now
dt = now - last_update
update_failed_count = params.get("UpdateFailedCount")
update_failed_count = 0 if update_failed_count is None else int(update_failed_count)
if dt.days > DAYS_NO_CONNECTIVITY_MAX > 1 and update_failed_count > 1:
if current_connectivity_alert != "expired":
current_connectivity_alert = "expired"
params.delete("Offroad_ConnectivityNeededPrompt")
params.put("Offroad_ConnectivityNeeded", json.dumps(OFFROAD_ALERTS["Offroad_ConnectivityNeeded"]))
elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT > 1:
remaining_time = str(max(DAYS_NO_CONNECTIVITY_MAX - dt.days, 0))
if current_connectivity_alert != "prompt" + remaining_time:
current_connectivity_alert = "prompt" + remaining_time
alert_connectivity_prompt = copy.copy(OFFROAD_ALERTS["Offroad_ConnectivityNeededPrompt"])
alert_connectivity_prompt["text"] += remaining_time + " days."
params.delete("Offroad_ConnectivityNeeded")
params.put("Offroad_ConnectivityNeededPrompt", json.dumps(alert_connectivity_prompt))
elif current_connectivity_alert is not None:
current_connectivity_alert = None
params.delete("Offroad_ConnectivityNeeded")
params.delete("Offroad_ConnectivityNeededPrompt")
# start constellation of processes when the car starts
ignition = health is not None and (health.health.ignitionLine or health.health.ignitionCan)
do_uninstall = params.get("DoUninstall") == b"1"
accepted_terms = params.get("HasAcceptedTerms") == terms_version
completed_training = params.get("CompletedTrainingVersion") == training_version
panda_signature = params.get("PandaFirmware")
fw_version_match = (panda_signature is None) or (panda_signature == FW_SIGNATURE) # don't show alert is no panda is connected (None)
should_start = ignition
# with 2% left, we killall, otherwise the phone will take a long time to boot
should_start = should_start and msg.thermal.freeSpace > 0.02
# confirm we have completed training and aren't uninstalling
should_start = should_start and accepted_terms and completed_training and (not do_uninstall)
# check for firmware mismatch
should_start = should_start and fw_version_match
# check if system time is valid
should_start = should_start and time_valid
# don't start while taking snapshot
if not should_start_prev:
is_taking_snapshot = params.get("IsTakingSnapshot") == b"1"
should_start = should_start and (not is_taking_snapshot)
if fw_version_match and not fw_version_match_prev:
params.delete("Offroad_PandaFirmwareMismatch")
if not fw_version_match and fw_version_match_prev:
params.put("Offroad_PandaFirmwareMismatch", json.dumps(OFFROAD_ALERTS["Offroad_PandaFirmwareMismatch"]))
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
if thermal_status >= ThermalStatus.danger:
should_start = False
if thermal_status_prev < ThermalStatus.danger:
params.put("Offroad_TemperatureTooHigh", json.dumps(OFFROAD_ALERTS["Offroad_TemperatureTooHigh"]))
else:
if thermal_status_prev >= ThermalStatus.danger:
params.delete("Offroad_TemperatureTooHigh")
if should_start:
if not should_start_prev:
params.delete("IsOffroad")
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
os.system('echo performance > /sys/class/devfreq/soc:qcom,cpubw/governor')
else:
if should_start_prev or (count == 0):
params.put("IsOffroad", "1")
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
os.system('echo powersave > /sys/class/devfreq/soc:qcom,cpubw/governor')
# shutdown if the battery gets lower than 3%, it's discharging, we aren't running for
# more than a minute but we were running
if msg.thermal.batteryPercent < BATT_PERC_OFF and msg.thermal.batteryStatus == "Discharging" and \
started_seen and (sec_since_boot() - off_ts) > 60:
os.system('LD_LIBRARY_PATH="" svc power shutdown')
# Offroad power monitoring
pm.calculate(health)
msg.thermal.offroadPowerUsage = pm.get_power_used()
msg.thermal.chargingError = current_filter.x > 0. and msg.thermal.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.thermal.started = started_ts is not None
msg.thermal.startedTs = int(1e9*(started_ts or 0))
msg.thermal.thermalStatus = thermal_status
thermal_sock.send(msg.to_bytes())
if usb_power_prev and not usb_power:
params.put("Offroad_ChargeDisabled", json.dumps(OFFROAD_ALERTS["Offroad_ChargeDisabled"]))
elif usb_power and not usb_power_prev:
params.delete("Offroad_ChargeDisabled")
thermal_status_prev = thermal_status
usb_power_prev = usb_power
fw_version_match_prev = fw_version_match
should_start_prev = should_start
# report to server once per minute
if (count % int(60. / DT_TRML)) == 0:
cloudlog.event("STATUS_PACKET",
count=count,
health=(health.to_dict() if health else None),
location=(location.to_dict() if location else None),
thermal=msg.to_dict())
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
|
the-stack_0_18333 | import os
import errno
import socket
import struct
from select import select
from .utils import DictWrapper
from . import netlink, connector
PROC_CN_MCAST_LISTEN = 0x1
PROC_CN_MCAST_IGNORE = 0x2
PROC_EVENT_NONE = 0x00000000
PROC_EVENT_FORK = 0x00000001
PROC_EVENT_EXEC = 0x00000002
PROC_EVENT_UID = 0x00000004
PROC_EVENT_GID = 0x00000040
PROC_EVENT_SID = 0x00000080
PROC_EVENT_PTRACE = 0x00000100
PROC_EVENT_COMM = 0x00000200
PROC_EVENT_EXIT = 0x80000000
process_events = {"PROC_EVENT_NONE": PROC_EVENT_NONE,
"PROC_EVENT_FORK": PROC_EVENT_FORK,
"PROC_EVENT_EXEC": PROC_EVENT_EXEC,
"PROC_EVENT_UID": PROC_EVENT_UID,
"PROC_EVENT_GID": PROC_EVENT_GID,
"PROC_EVENT_SID": PROC_EVENT_SID,
"PROC_EVENT_PTRACE": PROC_EVENT_PTRACE,
"PROC_EVENT_COMM": PROC_EVENT_COMM,
"PROC_EVENT_EXIT": PROC_EVENT_EXIT}
process_events_rev = dict(zip(process_events.values(),
process_events.keys()))
base_proc_event = struct.Struct("=2IL")
event_struct_map = {PROC_EVENT_NONE: struct.Struct("=I"),
PROC_EVENT_FORK: struct.Struct("=4I"),
PROC_EVENT_EXEC: struct.Struct("=2I"),
PROC_EVENT_UID: struct.Struct("=4I"),
PROC_EVENT_GID: struct.Struct("=4I"),
PROC_EVENT_SID: struct.Struct("=2I"),
PROC_EVENT_PTRACE: struct.Struct("=4I"),
PROC_EVENT_COMM: struct.Struct("=2I16s"),
PROC_EVENT_EXIT: struct.Struct("=4I")}
process_list = []
def pec_bind(s):
"""
Bind a socket to the Process Event Connector.
This will pass on any socket.error exception raised. The most
common one will be EPERM since you need root privileges to
bind to the connector.
"""
s.bind((os.getpid(), connector.CN_IDX_PROC))
def pec_control(s, listen=False):
"""
Notify PEC if we want event notifications on this socket or not.
"""
pec_ctrl_data = struct.Struct("=I")
if listen:
action = PROC_CN_MCAST_LISTEN
else:
action = PROC_CN_MCAST_IGNORE
nl_msg = netlink.netlink_pack(
netlink.NLMSG_DONE, 0, connector.pack_msg(
connector.CN_IDX_PROC, connector.CN_VAL_PROC, 0,
pec_ctrl_data.pack(action)))
s.send(nl_msg)
def pec_unpack(data):
"""
Peel off the wrapping layers from the data. This will return
a DictWrapper object.
"""
nl_hdr = netlink.unpack_hdr(data)
if nl_hdr.type != netlink.NLMSG_DONE:
# Ignore all other types of messages
return
# Slice off header data and trailing data (if any)
data = data[netlink.nlmsghdr.size:nl_hdr.len]
#msg = connector.unpack_msg(data)
# .. and away goes the connector_message, leaving just the payload
data = data[connector.cn_msg.size:]
event = list(base_proc_event.unpack(data[:base_proc_event.size]))
ev_data_struct = event_struct_map.get(event[0])
event_data = ev_data_struct.unpack(
data[base_proc_event.size:base_proc_event.size+ev_data_struct.size])
fields = ["what", "cpu", "timestamp_ns"]
if event[0] == PROC_EVENT_NONE:
fields.append("err")
event[1] = -1
elif event[0] == PROC_EVENT_FORK:
fields += ["parent_pid", "parent_tgid", "child_pid", "child_tgid"]
elif event[0] == PROC_EVENT_EXEC:
fields += ["process_pid", "process_tgid"]
elif event[0] == PROC_EVENT_UID:
fields += ["process_pid", "process_tgid", "ruid", "rgid"]
elif event[0] == PROC_EVENT_GID:
fields += ["process_pid", "process_tgid", "euid", "egid"]
elif event[0] == PROC_EVENT_SID:
fields += ["process_pid", "process_tgid"]
elif event[0] == PROC_EVENT_PTRACE:
fields += ["process_pid", "process_tgid", "tracer_pid", "tracer_tgid"]
elif event[0] == PROC_EVENT_COMM:
fields += ["process_pid", "process_tgid", "comm"]
elif event[0] == PROC_EVENT_EXIT:
fields += ["process_pid", "process_tgid", "exit_code", "exit_signal"]
return DictWrapper(zip(fields, tuple(event) + event_data))
def register_process(pid=None, process_name=None, events=(), action=None):
"""
Register a callback for processes of a specific name or
by pid. pec_loop() will call this callback for any processes
matching.
If no events is specified, all events related to
that pid will call the callback. The action can be any callable.
One argument will be passed to the callable, the PEC message,
as returned by pec_unpack().
"""
for x in events:
if x not in process_events:
raise Exception("No such process event: 0x%08x" % (int(x),))
process_list.append({'pid': pid,
'process_name': process_name,
'events': events})
def pec_loop(plist=process_list):
s = socket.socket(socket.AF_NETLINK,
socket.SOCK_DGRAM,
netlink.NETLINK_CONNECTOR)
# Netlink sockets are connected with pid and message group mask,
# message groups are for multicast protocols (like our process event
# connector).
try:
pec_bind(s)
except socket.error as sock_err:
_errno, errmsg = sock_err
if _errno == errno.EPERM:
raise Exception("You don't have permission to bind to the "
"process event connector. Try sudo.")
pec_control(s, listen=True)
while True:
(readable, w, e) = select([s],[],[])
buf = readable[0].recv(256)
event = pec_unpack(buf)
event["what"] = process_events_rev.get(event.what)
yield event
|
the-stack_0_18335 | from datetime import date
import pytest
from freezegun import freeze_time
from datahub.investment.validate import (
_is_provided_and_is_date_in_the_past,
is_provided_and_is_date_less_than_a_year_ago,
)
@pytest.mark.parametrize(
'data_date,expected_result',
(
(
date(2019, 2, 2),
False,
),
(
date(2019, 2, 1),
True,
),
(
date(2019, 1, 31),
True,
),
(
None,
False,
),
),
)
@freeze_time('2019-02-01')
def test_is_date_in_the_past(data_date, expected_result):
"""Tests that a given date is in the past."""
assert _is_provided_and_is_date_in_the_past(data_date) is expected_result
@pytest.mark.parametrize(
'post_data,expected_result',
(
(
date(2019, 2, 1),
True,
),
(
date(2019, 2, 2),
False,
),
(
date(2019, 1, 31),
True,
),
(
date(2017, 9, 30),
False,
),
(
None,
False,
),
(
{},
False,
),
),
)
@freeze_time('2019-02-01')
def test_is_date_less_than_a_year_ago(post_data, expected_result):
"""Tests if a given date is within the last year."""
assert is_provided_and_is_date_less_than_a_year_ago(post_data) is expected_result
|
the-stack_0_18339 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.speech_v1p1beta1.types import cloud_speech
from google.longrunning import operations_pb2 # type: ignore
from .base import SpeechTransport, DEFAULT_CLIENT_INFO
class SpeechGrpcTransport(SpeechTransport):
"""gRPC backend transport for Speech.
Service that implements Google Cloud Speech API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "speech.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "speech.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def recognize(
self,
) -> Callable[[cloud_speech.RecognizeRequest], cloud_speech.RecognizeResponse]:
r"""Return a callable for the recognize method over gRPC.
Performs synchronous speech recognition: receive
results after all audio has been sent and processed.
Returns:
Callable[[~.RecognizeRequest],
~.RecognizeResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "recognize" not in self._stubs:
self._stubs["recognize"] = self.grpc_channel.unary_unary(
"/google.cloud.speech.v1p1beta1.Speech/Recognize",
request_serializer=cloud_speech.RecognizeRequest.serialize,
response_deserializer=cloud_speech.RecognizeResponse.deserialize,
)
return self._stubs["recognize"]
@property
def long_running_recognize(
self,
) -> Callable[[cloud_speech.LongRunningRecognizeRequest], operations_pb2.Operation]:
r"""Return a callable for the long running recognize method over gRPC.
Performs asynchronous speech recognition: receive results via
the google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains
a ``LongRunningRecognizeResponse`` message. For more information
on asynchronous speech recognition, see the
`how-to <https://cloud.google.com/speech-to-text/docs/async-recognize>`__.
Returns:
Callable[[~.LongRunningRecognizeRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "long_running_recognize" not in self._stubs:
self._stubs["long_running_recognize"] = self.grpc_channel.unary_unary(
"/google.cloud.speech.v1p1beta1.Speech/LongRunningRecognize",
request_serializer=cloud_speech.LongRunningRecognizeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["long_running_recognize"]
@property
def streaming_recognize(
self,
) -> Callable[
[cloud_speech.StreamingRecognizeRequest],
cloud_speech.StreamingRecognizeResponse,
]:
r"""Return a callable for the streaming recognize method over gRPC.
Performs bidirectional streaming speech recognition:
receive results while sending audio. This method is only
available via the gRPC API (not REST).
Returns:
Callable[[~.StreamingRecognizeRequest],
~.StreamingRecognizeResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "streaming_recognize" not in self._stubs:
self._stubs["streaming_recognize"] = self.grpc_channel.stream_stream(
"/google.cloud.speech.v1p1beta1.Speech/StreamingRecognize",
request_serializer=cloud_speech.StreamingRecognizeRequest.serialize,
response_deserializer=cloud_speech.StreamingRecognizeResponse.deserialize,
)
return self._stubs["streaming_recognize"]
def close(self):
self.grpc_channel.close()
__all__ = ("SpeechGrpcTransport",)
|
the-stack_0_18342 | SETTING_FILENAME = 'filename'
SETTING_RECENT_FILES = 'recentFiles'
SETTING_WIN_SIZE = 'window/size'
SETTING_WIN_POSE = 'window/position'
SETTING_WIN_GEOMETRY = 'window/geometry'
SETTING_LINE_COLOR = 'line/color'
SETTING_FILL_COLOR = 'fill/color'
SETTING_ADVANCE_MODE = 'advanced'
SETTING_WIN_STATE = 'window/state'
SETTING_SAVE_DIR = 'savedir'
SETTING_PAINT_LABEL = 'paintlabel'
SETTING_LAST_OPEN_DIR = 'lastOpenDir'
SETTING_AUTO_SAVE = 'autosave'
SETTING_SINGLE_CLASS = 'singleclass'
FORMAT_PASCALVOC='PascalVOC'
FORMAT_YOLO='YOLO'
SETTING_DRAW_SQUARE = 'draw/square'
DEFAULT_ENCODING = 'utf-8'
SETTING_VIDEOFRAME_MODE = 'videoframe'
DEFAULT_PIXEL_RANDOM_X = 1
DEFAULT_PIXEL_RANDOM_Y = 1 |
the-stack_0_18347 | """Custom implementations of builtin types."""
from pytype import abstract
from pytype import abstract_utils
from pytype import function
from pytype import mixin
class TypeNew(abstract.PyTDFunction):
"""Implements type.__new__."""
def call(self, node, func, args):
if len(args.posargs) == 4:
self.match_args(node, args) # May raise FailedFunctionCall.
cls, name_var, bases_var, class_dict_var = args.posargs
try:
bases = list(abstract_utils.get_atomic_python_constant(bases_var))
if not bases:
bases = [self.vm.convert.object_type.to_variable(self.vm.root_node)]
node, variable = self.vm.make_class(
node, name_var, bases, class_dict_var, cls)
except abstract_utils.ConversionError:
pass
else:
return node, variable
elif (args.posargs and self.vm.callself_stack and
args.posargs[-1].data == self.vm.callself_stack[-1].data):
# We're calling type(self) in an __init__ method. A common pattern for
# making a class non-instantiable is:
# class Foo:
# def __init__(self):
# if type(self) is Foo:
# raise ...
# If we were to return 'Foo', pytype would think that this constructor
# can never return. The correct return type is something like
# TypeVar(bound=Foo), but we can't introduce a type parameter that isn't
# bound to a class or function, so we'll go with Any.
self.match_args(node, args) # May raise FailedFunctionCall.
return node, self.vm.new_unsolvable(node)
elif args.posargs and all(
v.full_name == "typing.Protocol" for v in args.posargs[-1].data):
# type(Protocol) is a _ProtocolMeta class that inherits from abc.ABCMeta.
# Changing the definition of Protocol in typing.pytd to include this
# metaclass causes a bunch of weird breakages, so we instead return the
# metaclass when type() or __class__ is accessed on Protocol. For
# simplicity, we pretend the metaclass is ABCMeta rather than a subclass.
self.match_args(node, args) # May raise FailedFunctionCall.
abc = self.vm.import_module("abc", "abc", 0).get_module("ABCMeta")
abc.load_lazy_attribute("ABCMeta")
return node, abc.members["ABCMeta"].AssignToNewVariable(node)
node, raw_ret = super().call(node, func, args)
# Removes TypeVars from the return value.
# See test_typevar.TypeVarTest.test_type_of_typevar(_error).
ret = self.vm.program.NewVariable()
for b in raw_ret.bindings:
value = self.vm.annotations_util.deformalize(b.data)
ret.AddBinding(value, {b}, node)
return node, ret
class BuiltinFunction(abstract.PyTDFunction):
"""Implementation of functions in builtins.pytd."""
name = None
@classmethod
def make(cls, vm):
assert cls.name
return super().make(cls.name, vm, "builtins")
def get_underlying_method(self, node, receiver, method_name):
"""Get the bound method that a built-in function delegates to."""
results = []
for b in receiver.bindings:
node, result = self.vm.attribute_handler.get_attribute(
node, b.data, method_name, valself=b)
if result is not None:
results.append(result)
if results:
return node, self.vm.join_variables(node, results)
else:
return node, None
def get_file_mode(sig, args):
callargs = {name: var for name, var, _ in sig.signature.iter_args(args)}
if "mode" in callargs:
return abstract_utils.get_atomic_python_constant(callargs["mode"])
else:
return ""
class Abs(BuiltinFunction):
"""Implements abs."""
name = "abs"
def call(self, node, _, args):
self.match_args(node, args)
arg = args.posargs[0]
node, fn = self.get_underlying_method(node, arg, "__abs__")
if fn is not None:
return self.vm.call_function(node, fn, function.Args(()))
else:
return node, self.vm.new_unsolvable(node)
class Next(BuiltinFunction):
"""Implements next."""
name = "next"
def _get_args(self, args):
arg = args.posargs[0]
if len(args.posargs) > 1:
default = args.posargs[1]
elif "default" in args.namedargs:
default = args.namedargs["default"]
else:
default = self.vm.program.NewVariable()
return arg, default
def call(self, node, _, args):
self.match_args(node, args)
arg, default = self._get_args(args)
node, fn = self.get_underlying_method(node, arg, self.vm.convert.next_attr)
if fn is not None:
node, ret = self.vm.call_function(node, fn, function.Args(()))
ret.PasteVariable(default)
return node, ret
else:
return node, self.vm.new_unsolvable(node)
class Filter(BuiltinFunction):
"""Implementation of filter(...)."""
name = "filter"
def _filter_pyval(self, data, node):
"""Filter None and False out of literal lists and tuples."""
if not isinstance(data, (abstract.List, abstract.Tuple)):
return None
remove = ([self.vm.convert.none], [self.vm.convert.false])
pyval = [x for x in data.pyval if x.data not in remove]
if len(pyval) < len(data.pyval):
return type(data)(pyval, data.vm).to_variable(node)
return None
def _filter_unions(self, data, node):
"""Remove None from any Union type parameters in data."""
param = data.cls.get_formal_type_parameter(abstract_utils.T)
if not param.isinstance_Union():
return None
new_opts = [x for x in param.options if x.name != "NoneType"]
if not new_opts:
return None
typ = self.vm.merge_values(new_opts)
cls = data.cls
params = {**cls.formal_type_parameters, abstract_utils.T: typ}
new_cls = type(cls)(cls.base_cls, params, cls.vm, cls.template)
return new_cls.instantiate(node)
def _filter_none(self, data, node):
if isinstance(data, abstract.Unsolvable):
return None
elif not data.cls:
return None
elif isinstance(data, mixin.PythonConstant):
return self._filter_pyval(data, node)
else:
return self._filter_unions(data, node)
return None
def call(self, node, func, args):
self.match_args(node, args)
if len(args.posargs) != 2:
return super().call(node, func, args)
pred, seq = args.posargs
# Special case filter(None, seq). We remove None from seq and then call the
# regular filter() so we don't need to reimplement eveything.
if pred.data == [self.vm.convert.none]:
result = self.vm.program.NewVariable()
for b in seq.bindings:
ret = self._filter_none(b.data, node)
if ret:
result.PasteVariable(ret, node, {b})
else:
result.PasteBinding(b)
args = function.Args((pred, result))
return super().call(node, func, args)
class ObjectPredicate(BuiltinFunction):
"""The base class for builtin predicates of the form f(obj, ...) -> bool.
Subclasses should implement run() for a specific signature.
(See UnaryPredicate and BinaryPredicate for examples.)
"""
def __init__(self, name, signatures, kind, vm):
super().__init__(name, signatures, kind, vm)
# Map of True/False/None (where None signals an ambiguous bool) to
# vm values.
self._vm_values = {
True: vm.convert.true,
False: vm.convert.false,
None: vm.convert.primitive_class_instances[bool],
}
def call(self, node, _, args):
try:
self.match_args(node, args)
node = node.ConnectNew(self.name)
result = self.vm.program.NewVariable()
self.run(node, args, result)
except function.InvalidParameters as ex:
self.vm.errorlog.invalid_function_call(self.vm.frames, ex)
result = self.vm.new_unsolvable(node)
return node, result
class UnaryPredicate(ObjectPredicate):
"""The base class for builtin predicates of the form f(obj).
Subclasses need to override the following:
_call_predicate(self, node, obj): The implementation of the predicate.
"""
def run(self, node, args, result):
for obj in args.posargs[0].bindings:
node, pyval = self._call_predicate(node, obj)
result.AddBinding(self._vm_values[pyval],
source_set=(obj,), where=node)
class BinaryPredicate(ObjectPredicate):
"""The base class for builtin predicates of the form f(obj, value).
Subclasses need to override the following:
_call_predicate(self, node, left, right): The implementation of the predicate.
"""
def run(self, node, args, result):
for left in args.posargs[0].bindings:
for right in args.posargs[1].bindings:
node, pyval = self._call_predicate(node, left, right)
result.AddBinding(self._vm_values[pyval],
source_set=(left, right), where=node)
class HasAttr(BinaryPredicate):
"""The hasattr() function."""
name = "hasattr"
def _call_predicate(self, node, left, right):
return self._has_attr(node, left.data, right.data)
def _has_attr(self, node, obj, attr):
"""Check if the object has attribute attr.
Args:
node: The given node.
obj: A BaseValue, generally the left hand side of a
hasattr() call.
attr: A BaseValue, generally the right hand side of a
hasattr() call.
Returns:
(node, result) where result = True if the object has attribute attr, False
if it does not, and None if it is ambiguous.
"""
if isinstance(obj, abstract.AMBIGUOUS_OR_EMPTY):
return node, None
# If attr is not a literal constant, don't try to resolve it.
if (not isinstance(attr, mixin.PythonConstant) or
not isinstance(attr.pyval, str)):
return node, None
node, ret = self.vm.attribute_handler.get_attribute(node, obj, attr.pyval)
return node, ret is not None
def _flatten(value, classes):
"""Flatten the contents of value into classes.
If value is a Class, it is appended to classes.
If value is a PythonConstant of type tuple, then each element of the tuple
that has a single binding is also flattened.
Any other type of value, or tuple elements that have multiple bindings are
ignored.
Args:
value: An abstract value.
classes: A list to be modified.
Returns:
True iff a value was ignored during flattening.
"""
# Used by IsInstance and IsSubclass
if isinstance(value, abstract.AnnotationClass):
value = value.base_cls
if isinstance(value, mixin.Class):
# A single class, no ambiguity.
classes.append(value)
return False
elif isinstance(value, abstract.Tuple):
# A tuple, need to process each element.
ambiguous = False
for var in value.pyval:
if (len(var.bindings) != 1 or
_flatten(var.bindings[0].data, classes)):
# There were either multiple bindings or ambiguity deeper in the
# recursion.
ambiguous = True
return ambiguous
else:
return True
def _check_against_mro(vm, target, class_spec):
"""Check if any of the classes are in the target's MRO.
Args:
vm: The virtual machine.
target: A BaseValue whose MRO will be checked.
class_spec: A Class or PythonConstant tuple of classes (i.e. the second
argument to isinstance or issubclass).
Returns:
True if any class in classes is found in the target's MRO,
False if no match is found and None if it's ambiguous.
"""
# Determine the flattened list of classes to check.
classes = []
ambiguous = _flatten(class_spec, classes)
for c in classes:
if vm.matcher.match_from_mro(target, c, allow_compat_builtins=False):
return True # A definite match.
# No matches, return result depends on whether _flatten() was
# ambiguous.
return None if ambiguous else False
class IsInstance(BinaryPredicate):
"""The isinstance() function."""
name = "isinstance"
def _call_predicate(self, node, left, right):
return node, self._is_instance(left.data, right.data)
def _is_instance(self, obj, class_spec):
"""Check if the object matches a class specification.
Args:
obj: A BaseValue, generally the left hand side of an
isinstance() call.
class_spec: A BaseValue, generally the right hand side of an
isinstance() call.
Returns:
True if the object is derived from a class in the class_spec, False if
it is not, and None if it is ambiguous whether obj matches class_spec.
"""
cls = obj.get_class()
if (isinstance(obj, abstract.AMBIGUOUS_OR_EMPTY) or cls is None or
isinstance(cls, abstract.AMBIGUOUS_OR_EMPTY)):
return None
return _check_against_mro(self.vm, cls, class_spec)
class IsSubclass(BinaryPredicate):
"""The issubclass() function."""
name = "issubclass"
def _call_predicate(self, node, left, right):
return node, self._is_subclass(left.data, right.data)
def _is_subclass(self, cls, class_spec):
"""Check if the given class is a subclass of a class specification.
Args:
cls: A BaseValue, the first argument to an issubclass call.
class_spec: A BaseValue, the second issubclass argument.
Returns:
True if the class is a subclass (or is a class) in the class_spec, False
if not, and None if it is ambiguous.
"""
if isinstance(cls, abstract.AMBIGUOUS_OR_EMPTY):
return None
return _check_against_mro(self.vm, cls, class_spec)
class IsCallable(UnaryPredicate):
"""The callable() function."""
name = "callable"
def _call_predicate(self, node, obj):
return self._is_callable(node, obj)
def _is_callable(self, node, obj):
"""Check if the object is callable.
Args:
node: The given node.
obj: A BaseValue, the arg of a callable() call.
Returns:
(node, result) where result = True if the object is callable,
False if it is not, and None if it is ambiguous.
"""
# NOTE: This duplicates logic in the matcher; if this function gets any
# longer consider calling matcher._match_value_against_type(obj,
# convert.callable) instead.
val = obj.data
if isinstance(val, abstract.AMBIGUOUS_OR_EMPTY):
return node, None
# Classes are always callable.
if isinstance(val, mixin.Class):
return node, True
# Otherwise, see if the object has a __call__ method.
node, ret = self.vm.attribute_handler.get_attribute(
node, val, "__call__", valself=obj)
return node, ret is not None
class BuiltinClass(abstract.PyTDClass):
"""Implementation of classes in builtins.pytd.
The module name is passed in to allow classes in other modules to subclass a
module in builtins and inherit the custom behaviour.
"""
def __init__(self, vm, name, module="builtins"):
if module == "builtins":
pytd_cls = vm.lookup_builtin("builtins.%s" % name)
else:
ast = vm.loader.import_name(module)
pytd_cls = ast.Lookup("%s.%s" % (module, name))
super().__init__(name, pytd_cls, vm)
self.module = module
class SuperInstance(abstract.BaseValue):
"""The result of a super() call, i.e., a lookup proxy."""
def __init__(self, cls, obj, vm):
super().__init__("super", vm)
self.cls = self.vm.convert.super_type
self.super_cls = cls
self.super_obj = obj
self.get = abstract.NativeFunction("__get__", self.get, self.vm)
def get(self, node, *unused_args, **unused_kwargs):
return node, self.to_variable(node)
def _get_descriptor_from_superclass(self, node, cls):
obj = cls.instantiate(node)
ret = []
for b in obj.bindings:
_, attr = self.vm.attribute_handler.get_attribute(
node, b.data, "__get__", valself=b)
if attr:
ret.append(attr)
if ret:
return self.vm.join_variables(node, ret)
return None
def get_special_attribute(self, node, name, valself):
if name == "__get__":
for cls in self.super_cls.mro[1:]:
attr = self._get_descriptor_from_superclass(node, cls)
if attr:
return attr
# If we have not successfully called __get__ on an instance of the
# superclass, fall back to returning self.
return self.get.to_variable(node)
else:
return super().get_special_attribute(node, name, valself)
def get_class(self):
return self.cls
def call(self, node, _, args):
self.vm.errorlog.not_callable(self.vm.frames, self)
return node, self.vm.new_unsolvable(node)
class Super(BuiltinClass):
"""The super() function. Calling it will create a SuperInstance."""
# Minimal signature, only used for constructing exceptions.
_SIGNATURE = function.Signature.from_param_names("super", ("cls", "self"))
def __init__(self, vm):
super().__init__(vm, "super")
def call(self, node, _, args):
result = self.vm.program.NewVariable()
num_args = len(args.posargs)
if num_args == 0 and self.vm.PY3:
# The implicit type argument is available in a freevar named '__class__'.
cls_var = None
# If we are in a list comprehension we want the enclosing frame.
index = -1
while self.vm.frames[index].f_code.co_name == "<listcomp>":
index -= 1
frame = self.vm.frames[index]
for i, free_var in enumerate(frame.f_code.co_freevars):
if free_var == abstract.BuildClass.CLOSURE_NAME:
cls_var = frame.cells[len(frame.f_code.co_cellvars) + i]
break
if not (cls_var and cls_var.bindings):
self.vm.errorlog.invalid_super_call(
self.vm.frames, message="Missing __class__ closure for super call.",
details="Is 'super' being called from a method defined in a class?")
return node, self.vm.new_unsolvable(node)
# The implicit super object argument is the first positional argument to
# the function calling 'super'.
self_arg = frame.first_posarg
if not self_arg:
self.vm.errorlog.invalid_super_call(
self.vm.frames, message="Missing 'self' argument to 'super' call.")
return node, self.vm.new_unsolvable(node)
super_objects = self_arg.bindings
elif 1 <= num_args <= 2:
cls_var = args.posargs[0]
super_objects = args.posargs[1].bindings if num_args == 2 else [None]
else:
raise function.WrongArgCount(self._SIGNATURE, args, self.vm)
for cls in cls_var.bindings:
if not isinstance(cls.data, (mixin.Class, abstract.AMBIGUOUS_OR_EMPTY)):
bad = function.BadParam(
name="cls", expected=self.vm.convert.type_type)
raise function.WrongArgTypes(
self._SIGNATURE, args, self.vm, bad_param=bad)
for obj in super_objects:
if obj:
result.AddBinding(
SuperInstance(cls.data, obj.data, self.vm), [cls, obj], node)
else:
result.AddBinding(
SuperInstance(cls.data, None, self.vm), [cls], node)
return node, result
class Object(BuiltinClass):
"""Implementation of builtins.object."""
def __init__(self, vm):
super().__init__(vm, "object")
def is_object_new(self, func):
"""Whether the given function is object.__new__.
Args:
func: A function.
Returns:
True if func equals either of the pytd definitions for object.__new__,
False otherwise.
"""
self.load_lazy_attribute("__new__")
self.load_lazy_attribute("__new__extra_args")
return ([func] == self.members["__new__"].data or
[func] == self.members["__new__extra_args"].data)
def _has_own(self, node, cls, method):
"""Whether a class has its own implementation of a particular method.
Args:
node: The current node.
cls: A mixin.Class.
method: The method name. So that we don't have to handle the cases when
the method doesn't exist, we only support "__new__" and "__init__".
Returns:
True if the class's definition of the method is different from the
definition in builtins.object, False otherwise.
"""
assert method in ("__new__", "__init__")
if not isinstance(cls, mixin.Class):
return False
self.load_lazy_attribute(method)
obj_method = self.members[method]
_, cls_method = self.vm.attribute_handler.get_attribute(node, cls, method)
return obj_method.data != cls_method.data
def get_special_attribute(self, node, name, valself):
# Based on the definitions of object_init and object_new in
# cpython/Objects/typeobject.c (https://goo.gl/bTEBRt). It is legal to pass
# extra arguments to object.__new__ if the calling class overrides
# object.__init__, and vice versa.
if valself and not abstract_utils.equivalent_to(valself, self):
val = valself.data
if name == "__new__" and self._has_own(node, val, "__init__"):
self.load_lazy_attribute("__new__extra_args")
return self.members["__new__extra_args"]
elif (name == "__init__" and isinstance(val, abstract.Instance) and
self._has_own(node, val.cls, "__new__")):
self.load_lazy_attribute("__init__extra_args")
return self.members["__init__extra_args"]
return super().get_special_attribute(node, name, valself)
class RevealType(abstract.BaseValue):
"""For debugging. reveal_type(x) prints the type of "x"."""
def __init__(self, vm):
super().__init__("reveal_type", vm)
def call(self, node, _, args):
for a in args.posargs:
self.vm.errorlog.reveal_type(self.vm.frames, node, a)
return node, self.vm.convert.build_none(node)
class PropertyTemplate(BuiltinClass):
"""Template for property decorators."""
_KEYS = ["fget", "fset", "fdel", "doc"]
def __init__(self, vm, name, module="builtins"): # pylint: disable=useless-super-delegation
super().__init__(vm, name, module)
def signature(self):
# Minimal signature, only used for constructing exceptions.
return function.Signature.from_param_names(self.name, tuple(self._KEYS))
def _get_args(self, args):
ret = dict(zip(self._KEYS, args.posargs))
for k, v in args.namedargs.iteritems():
if k not in self._KEYS:
raise function.WrongKeywordArgs(self.signature(), args, self.vm, [k])
ret[k] = v
return ret
def call(self, node, funcv, args):
raise NotImplementedError()
class PropertyInstance(abstract.SimpleValue, mixin.HasSlots):
"""Property instance (constructed by Property.call())."""
def __init__(self, vm, name, cls, fget=None, fset=None, fdel=None, doc=None):
super().__init__("property", vm)
mixin.HasSlots.init_mixin(self)
self.name = name # Reports the correct decorator in error messages.
self.fget = fget
self.fset = fset
self.fdel = fdel
self.doc = doc
self.cls = cls
self.set_slot("__get__", self.fget_slot)
self.set_slot("__set__", self.fset_slot)
self.set_slot("__delete__", self.fdelete_slot)
self.set_slot("getter", self.getter_slot)
self.set_slot("setter", self.setter_slot)
self.set_slot("deleter", self.deleter_slot)
self.is_abstract = any(self._is_fn_abstract(x) for x in [fget, fset, fdel])
def _is_fn_abstract(self, func_var):
if func_var is None:
return False
return any(getattr(d, "is_abstract", None) for d in func_var.data)
def get_class(self):
return self.cls
def fget_slot(self, node, obj, objtype):
return self.vm.call_function(node, self.fget, function.Args((obj,)))
def fset_slot(self, node, obj, value):
return self.vm.call_function(
node, self.fset, function.Args((obj, value)))
def fdelete_slot(self, node, obj):
return self.vm.call_function(
node, self.fdel, function.Args((obj,)))
def getter_slot(self, node, fget):
prop = PropertyInstance(
self.vm, self.name, self.cls, fget, self.fset, self.fdel, self.doc)
result = self.vm.program.NewVariable([prop], fget.bindings, node)
return node, result
def setter_slot(self, node, fset):
prop = PropertyInstance(
self.vm, self.name, self.cls, self.fget, fset, self.fdel, self.doc)
result = self.vm.program.NewVariable([prop], fset.bindings, node)
return node, result
def deleter_slot(self, node, fdel):
prop = PropertyInstance(
self.vm, self.name, self.cls, self.fget, self.fset, fdel, self.doc)
result = self.vm.program.NewVariable([prop], fdel.bindings, node)
return node, result
def isinstance_PropertyInstance(self):
return True
class Property(PropertyTemplate):
"""Property method decorator."""
def __init__(self, vm):
super().__init__(vm, "property")
def call(self, node, funcv, args):
property_args = self._get_args(args)
return node, PropertyInstance(
self.vm, "property", self, **property_args).to_variable(node)
class StaticMethodInstance(abstract.SimpleValue, mixin.HasSlots):
"""StaticMethod instance (constructed by StaticMethod.call())."""
def __init__(self, vm, cls, func):
super().__init__("staticmethod", vm)
mixin.HasSlots.init_mixin(self)
self.func = func
self.cls = cls
self.set_slot("__get__", self.func_slot)
def get_class(self):
return self.cls
def func_slot(self, node, obj, objtype):
return node, self.func
def isinstance_StaticMethodInstance(self):
return True
class StaticMethod(BuiltinClass):
"""Static method decorator."""
# Minimal signature, only used for constructing exceptions.
_SIGNATURE = function.Signature.from_param_names("staticmethod", ("func",))
def __init__(self, vm):
super().__init__(vm, "staticmethod")
def call(self, node, funcv, args):
if len(args.posargs) != 1:
raise function.WrongArgCount(self._SIGNATURE, args, self.vm)
arg = args.posargs[0]
return node, StaticMethodInstance(self.vm, self, arg).to_variable(node)
class ClassMethodCallable(abstract.BoundFunction):
"""Tag a ClassMethod bound function so we can dispatch on it."""
class ClassMethodInstance(abstract.SimpleValue, mixin.HasSlots):
"""ClassMethod instance (constructed by ClassMethod.call())."""
def __init__(self, vm, cls, func):
super().__init__("classmethod", vm)
mixin.HasSlots.init_mixin(self)
self.cls = cls
self.func = func
self.set_slot("__get__", self.func_slot)
def get_class(self):
return self.cls
def func_slot(self, node, obj, objtype):
results = [ClassMethodCallable(objtype, b.data) for b in self.func.bindings]
return node, self.vm.program.NewVariable(results, [], node)
def isinstance_ClassMethodInstance(self):
return True
class ClassMethod(BuiltinClass):
"""Static method decorator."""
# Minimal signature, only used for constructing exceptions.
_SIGNATURE = function.Signature.from_param_names("classmethod", ("func",))
def __init__(self, vm):
super().__init__(vm, "classmethod")
def call(self, node, funcv, args):
if len(args.posargs) != 1:
raise function.WrongArgCount(self._SIGNATURE, args, self.vm)
arg = args.posargs[0]
for d in arg.data:
d.is_classmethod = True
return node, ClassMethodInstance(self.vm, self, arg).to_variable(node)
|
the-stack_0_18348 | import matplotlib as mil
import tensorflow as tf
from matplotlib import pyplot
fig = pyplot.gcf()
fig.set_size_inches(4, 4)
sess = tf.InteractiveSession()
image_filename = "/home/ubuntu/Downloads/n02107142_16917.jpg"
filename_queue = tf.train.string_input_producer([image_filename]) # list of files to read
reader = tf.WholeFileReader()
try:
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
print(image)
except Exception as e:
print(e)
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image_batch = tf.image.convert_image_dtype(tf.expand_dims(image, 0), tf.float32, saturate=False)
# In[8]:
kernel = tf.constant([
[
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]
],
[
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ 8., 0., 0.], [ 0., 8., 0.], [ 0., 0., 8.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]
],
[
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]],
[[ -1., 0., 0.], [ 0., -1., 0.], [ 0., 0., -1.]]
]
])
conv2d = tf.nn.conv2d(image_batch, kernel, [1, 1, 1, 1], padding="SAME")
activation_map = sess.run(tf.minimum(tf.nn.relu(conv2d), 255))
fig = pyplot.gcf()
pyplot.imshow(activation_map[0], interpolation='nearest')
fig.set_size_inches(4, 4)
fig.savefig("./example-edge-detection.png")
#pyplot.show() |
the-stack_0_18349 | from js9 import j
base = j.tools.prefab._getBaseClass()
class Prefabjs8Core(base):
def install(self, reset=False, deps=True, branch='8.2.0', keep=False):
if not reset and self.doneGet("install"):
return
if reset:
self.prefab.system.package.ensure('psmisc')
for process in ['mongodb', 'redis', 'redis-server', 'ardb-server', 'tmux']:
self.prefab.core.run('killall %s' % process, die=False)
C = """
rm -f $TMPDIR/jsexecutor*
rm -f $TMPDIR/jsinstall*
rm -rf $TMPDIR/actions*
set -ex
rm -rf /opt/*
"""
self.prefab.core.run(C, die=False)
if branch != "master":
C = """
set -ex
apt install curl -y
cd $TMPDIR
rm -f install.sh
export JSBRANCH="$branch"
curl -k https://raw.githubusercontent.com/Jumpscale/jumpscale_core9/$JSBRANCH/install/install.sh?$RANDOM > install.sh
bash install.sh
"""
C = C.replace("$branch", branch)
self.prefab.core.run(C)
else:
C = """
set -ex
apt install curl -y
cd $TMPDIR
rm -f install.sh
curl -k https://raw.githubusercontent.com/Jumpscale/jumpscale_core9/master/install/install.sh?$RANDOM > install.sh
bash install.sh
"""
self.prefab.core.run(C)
self.doneSet("install")
# should not do this, is otherwise different than the std install
# def installDeps(self):
#
# self.prefab.system.base.install()
# self.prefab.runtimes.python.install()
# self.prefab.runtimes.pip.ensure()
# self.prefab.apps.redis.install()
# self.prefab.lib.brotli.build()
# self.prefab.lib.brotli.install()
#
# self.prefab.runtimes.pip.install('pytoml')
# self.prefab.runtimes.pip.install('pygo')
# self.prefab.system.package.ensure('libxml2-dev')
# self.prefab.system.package.ensure('libxslt1-dev')
#
# # python etcd
# C = """
# cd $TMPDIR/
# git clone https://github.com/jplana/python-etcd.git
# cd python-etcd
# python3 setup.py install
# """
# C = self.replace(C)
# self.prefab.core.run(C)
#
# # gevent
# C = """
# pip3 install 'cython>=0.23.4' git+git://github.com/gevent/gevent.git#egg=gevent
# """
# self.prefab.core.run(C)
#
# C = """
# # cffi==1.5.2
# cffi
# paramiko
#
# msgpack-python
# redis
# #credis
# aioredis
#
# mongoengine==0.10.6
#
# certifi
# docker-py
# http://carey.geek.nz/code/python-fcrypt/fcrypt-1.3.1.tar.gz
#
# gitlab3
# gitpython
# html2text
#
# # pysqlite
# click
# influxdb
# ipdb
# ipython --upgrade
# jinja2
# netaddr
# wtforms_json
#
# reparted
# pytoml
# pystache
# pymongo
# psycopg2
# pathtools
# psutil
#
# pytz
# requests
# sqlalchemy
# urllib3
# zmq
# pyyaml
# python-etcd
# websocket
# marisa-trie
# pylzma
# ujson
# watchdog
# pygo
# pygithub
# minio
#
# # colorlog
# colored-traceback
# #pygments
# tmuxp
#
# ply
# xonsh
# pudb
#
# traitlets
# python-telegram-bot
# colorlog
# path.py
# dnspython3
# packet-python
# gspread
# oauth2client
# crontab
# beautifulsoup4
# lxml
# pycapnp
# """
# self.prefab.runtimes.pip.multiInstall(C, upgrade=True)
#
# # snappy install
# self.prefab.system.package.ensure('libsnappy-dev')
# self.prefab.system.package.ensure('libsnappy1v5')
# self.prefab.runtimes.pip.install('python-snappy')
#
# if self.prefab.platformtype.osname != "debian":
# C = """
# blosc
# bcrypt
# """
# self.prefab.runtimes.pip.multiInstall(C, upgrade=True)
|
the-stack_0_18350 | import boto3
import os
import json
import logging
import uuid
from urllib.parse import unquote_plus
from sitewise_integration_points import SitewiseIntegrationPoints
logger = logging.getLogger()
logger.setLevel(getattr(logging, os.environ.get("LOG_LEVEL", "INFO").upper()))
integration_points_dynamo = SitewiseIntegrationPoints(os.environ['DYNAMO_INTEGRATION_POINTS_TABLE_NAME'], os.environ['AWS_REGION'])
def cache_integration_points(messages):
operators = [message for message in messages if message.get('type') == 'lifecycle' and message.get('reading', {}).get('et') == 'operator_updated']
for operator in operators:
integration_points_dynamo.save(operator)
def send_sqs_messages(messages, queue_url=os.environ['LIFECICLE_EVENTS_QUEUE_URL'], batch_size=int(os.environ.get("BATCH_SIZE", 10))):
sqs_client = boto3.client('sqs', region_name=os.environ.get("AWS_REGION"))
for i in range(0, len(messages), batch_size):
messages_chunk = list(messages)[i:i + batch_size]
entries = [{'Id': str(idx), 'MessageBody': json.dumps(msg), 'MessageGroupId': 'lifecycle_events'} for idx,msg in enumerate(messages_chunk)]
response = sqs_client.send_message_batch(
QueueUrl=queue_url,
Entries=entries
)
logger.info(f"Successfully {response}")
def handler(event, context):
sqs_messages = []
s3_client = boto3.client('s3', region_name=os.environ.get("AWS_REGION"))
for record in event['Records']:
bucket = record['s3']['bucket']['name']
key = unquote_plus(record['s3']['object']['key'])
tmpkey = key.replace('/', '')
download_path = '/tmp/{}{}'.format(uuid.uuid4(), tmpkey)
s3_client.download_file(bucket, key, download_path)
with open(download_path) as json_file:
data = json.load(json_file)
sqs_messages += data.get('assetModels', [])
sqs_messages += data.get('assets', [])
cache_integration_points(sqs_messages)
send_sqs_messages(sqs_messages)
if __name__ == '__main__':
event = {
"Records": [
{
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "testConfigRule",
"bucket": {
"name": "ricostg-uploads-sydney",
"ownerIdentity": {
"principalId": "EXAMPLE"
},
"arn": "arn:aws:s3:::example-bucket"
},
"object": {
"key": "sitewise/test/Operator_mkGzglkBFEl7o8PCkRgx9A.json",
"size": 1024,
"eTag": "0123456789abcdef0123456789abcdef",
"sequencer": "0A1B2C3D4E5F678901"
}
}
}
]
}
handler(event, None)
|
the-stack_0_18355 | """empty message
Revision ID: 4d139412cd47
Revises: 352abb027016
Create Date: 2022-03-24 11:01:45.925852
"""
from alembic import op
import sqlalchemy as sa # Noqa F401
# revision identifiers, used by Alembic.
revision = "4d139412cd47"
down_revision = "352abb027016"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_clients_phone", table_name="clients")
op.create_index(op.f("ix_clients_phone"), "clients", ["phone"], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_clients_phone"), table_name="clients")
op.create_index("ix_clients_phone", "clients", ["phone"], unique=False)
# ### end Alembic commands ###
|
the-stack_0_18360 | from typing import Dict, List
from xstate.state_node import StateNode
from xstate.state import State
from xstate.algorithm import enter_states, get_state_value, main_event_loop
from xstate.event import Event
class Machine:
root: StateNode
_id_map: Dict[str, StateNode]
def __init__(self, config):
self.id = config["id"]
self._id_map = {}
self.root = StateNode(
config, machine=self, key=config.get("id", "(machine)"), parent=None
)
self.states = self.root.states
def transition(self, state: State, event: str):
(configuration, actions) = main_event_loop(self, state, Event(event))
value = get_state_value(self.root, configuration=configuration)
return State(configuration=configuration, context={}, actions=actions)
def state_from(self, state_value) -> State:
configuration = self._get_configuration(state_value=state_value)
return State(configuration=configuration, context=None)
def _register(self, state_node: StateNode):
state_node.machine = self
self._id_map[state_node.id] = state_node
def _get_by_id(self, id: str) -> StateNode:
return self._id_map.get(id, None)
def _get_configuration(self, state_value, parent=None) -> List[StateNode]:
if parent is None:
parent = self.root
if isinstance(state_value, str):
state_node = parent.states.get(state_value, None)
if state_node is None:
raise ValueError(f"State node {state_value} is missing")
return [state_node]
configuration = []
for key in state_value.keys():
state_node = parent.states.get(key)
configuration.append(state_node)
configuration += self._get_configuration(
state_value.get(key), parent=state_node
)
return configuration
@property
def initial_state(self) -> State:
(configuration, actions, internal_queue) = enter_states(
[self.root.initial],
configuration=set(),
states_to_invoke=set(),
history_value={},
actions=[],
internal_queue=[],
)
return State(configuration=configuration, context={}, actions=actions)
|
the-stack_0_18365 | import builtins
import os
from rich.repr import RichReprResult
import sys
from array import array
from collections import Counter, defaultdict, deque, UserDict, UserList
import dataclasses
from dataclasses import dataclass, fields, is_dataclass
from inspect import isclass
from itertools import islice
import re
from typing import (
DefaultDict,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from types import MappingProxyType
try:
import attr as _attr_module
except ImportError: # pragma: no cover
_attr_module = None # type: ignore
from .highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
# Matches Jupyter's special methods
_re_jupyter_repr = re.compile(f"^_repr_.+_$")
def _is_attr_object(obj: Any) -> bool:
"""Check if an object was created with attrs module."""
return _attr_module is not None and _attr_module.has(type(obj))
def _get_attr_fields(obj: Any) -> Iterable["_attr_module.Attribute[Any]"]:
"""Get fields for an attrs object."""
return _attr_module.fields(type(obj)) if _attr_module is not None else []
def _is_dataclass_repr(obj: object) -> bool:
"""Check if an instance of a dataclass contains the default repr.
Args:
obj (object): A dataclass instance.
Returns:
bool: True if the default repr is used, False if there is a custom repr.
"""
# Digging in to a lot of internals here
# Catching all exceptions in case something is missing on a non CPython implementation
try:
return obj.__repr__.__code__.co_filename == dataclasses.__file__
except Exception:
return False
def install(
console: Optional["Console"] = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations don't use rich
if console.is_jupyter and any(
_re_jupyter_repr.match(attr) for attr in dir(value)
):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
new_line_start=True,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: Optional["HighlighterType"] = None,
*,
indent_size: int = 4,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
UserDict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
UserList: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj))
or (hasattr(obj, "__rich_repr__"))
or _is_attr_object(obj)
) and not isclass(obj)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
separator: str = ", "
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield self.separator
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
parent: Optional["_Line"] = None
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
last: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
new_line = yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for last, child in loop_last(node.children):
separator = "," if tuple_of_one else node.separator
line = _Line(
parent=new_line,
node=child,
whitespace=child_whitespace,
suffix=separator,
last=last and not tuple_of_one,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix=self.suffix,
last=self.last,
)
def __str__(self) -> str:
if self.last:
return f"{self.whitespace}{self.text}{self.node or ''}"
else:
return (
f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
)
def traverse(
_object: Any, max_length: Optional[int] = None, max_string: Optional[int] = None
) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error {str(error)!r}>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
try:
fake_attributes = hasattr(
obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
)
except Exception:
fake_attributes = False
rich_repr_result: Optional[RichReprResult] = None
if not fake_attributes:
try:
if hasattr(obj, "__rich_repr__") and not isclass(obj):
rich_repr_result = obj.__rich_repr__()
except Exception:
pass
if rich_repr_result is not None:
angular = getattr(obj.__rich_repr__, "angular", False)
args = list(iter_rich_args(rich_repr_result))
class_name = obj.__class__.__name__
if args:
children = []
append = children.append
if angular:
node = Node(
open_brace=f"<{class_name} ",
close_brace=">",
children=children,
last=root,
separator=" ",
)
else:
node = Node(
open_brace=f"{class_name}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"<{class_name}>" if angular else f"{class_name}()",
children=[],
last=root,
)
elif _is_attr_object(obj) and not fake_attributes:
children = []
append = children.append
attr_fields = _get_attr_fields(obj)
if attr_fields:
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
def iter_attrs() -> Iterable[
Tuple[str, Any, Optional[Callable[[Any], str]]]
]:
"""Iterate over attr fields and values."""
for attr in attr_fields:
if attr.repr:
try:
value = getattr(obj, attr.name)
except Exception as error:
# Can happen, albeit rarely
yield (attr.name, error, None)
else:
yield (
attr.name,
value,
attr.repr if callable(attr.repr) else None,
)
for last, (name, value, repr_callable) in loop_last(iter_attrs()):
if repr_callable:
child_node = Node(value_repr=str(repr_callable(value)))
else:
child_node = _traverse(value)
child_node.last = last
child_node.key_repr = name
child_node.key_separator = "="
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and not fake_attributes
and (_is_dataclass_repr(obj) or py_version == (3, 6))
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif isinstance(obj, _CONTAINERS):
for container_type in _CONTAINERS:
if isinstance(obj, container_type):
obj_type = container_type
break
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj_type.__repr__ != type(obj).__repr__:
node = Node(value_repr=to_repr(obj), last=root)
elif obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: Optional["Console"] = None,
indent_guides: bool = True,
max_length: Optional[int] = None,
max_string: Optional[int] = None,
expand_all: bool = False,
) -> None:
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self) -> str:
1 / 0
return "this will fail"
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
|
the-stack_0_18366 | #!/usr/bin/env python
# Copyright 2016-2019 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.utils import check_random_state
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from WORC.classification.SearchCV import RandomizedSearchCVfastr, RandomizedSearchCVJoblib
def random_search_parameters(features, labels, N_iter, test_size,
param_grid, scoring_method,
n_jobspercore=200, use_fastr=False,
n_cores=1, fastr_plugin=None):
"""
Train a classifier and simultaneously optimizes hyperparameters using a
randomized search.
Arguments:
features: numpy array containing the training features.
labels: list containing the object labels to be trained on.
N_iter: integer listing the number of iterations to be used in the
hyperparameter optimization.
test_size: float listing the test size percentage used in the cross
validation.
classifier: sklearn classifier to be tested
param_grid: dictionary containing all possible hyperparameters and their
values or distrubitions.
scoring_method: string defining scoring method used in optimization,
e.g. f1_weighted for a SVM.
n_jobsperscore: integer listing the number of jobs that are ran on a
single core when using the fastr randomized search.
use_fastr: Boolean determining of either fastr or joblib should be used
for the opimization.
fastr_plugin: determines which plugin is used for fastr executions.
When None, uses the default plugin from the fastr config.
Returns:
random_search: sklearn randomsearch object containing the results.
"""
random_seed = np.random.randint(1, 5000)
random_state = check_random_state(random_seed)
regressors = ['SVR', 'RFR', 'SGDR', 'Lasso', 'ElasticNet']
if any(clf in regressors for clf in param_grid['classifiers']):
# We cannot do a stratified shuffle split with regression
cv = ShuffleSplit(n_splits=5, test_size=test_size,
random_state=random_state)
else:
cv = StratifiedShuffleSplit(n_splits=5, test_size=test_size,
random_state=random_state)
if use_fastr:
random_search = RandomizedSearchCVfastr(param_distributions=param_grid,
n_iter=N_iter,
scoring=scoring_method,
n_jobs=n_cores,
n_jobspercore=n_jobspercore,
verbose=1, cv=cv,
fastr_plugin=fastr_plugin)
else:
random_search = RandomizedSearchCVJoblib(param_distributions=param_grid,
n_iter=N_iter,
scoring=scoring_method,
n_jobs=n_cores,
verbose=1, cv=cv)
random_search.fit(features, labels)
print("Best found parameters:")
for i in random_search.best_params_:
print(f'{i}: {random_search.best_params_[i]}.')
print("\n Best score using best parameters:")
print(random_search.best_score_)
return random_search
|
the-stack_0_18367 | from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from .views import (
CancelView,
ChangeCardView,
ChangePlanView,
HistoryView,
SubscribeView
)
urlpatterns = patterns(
"payments.views",
url(r"^webhook/$", "webhook", name="payments_webhook"),
url(r"^a/subscribe/$", "subscribe", name="payments_ajax_subscribe"),
url(r"^a/change/card/$", "change_card", name="payments_ajax_change_card"),
url(r"^a/change/plan/$", "change_plan", name="payments_ajax_change_plan"),
url(r"^a/cancel/$", "cancel", name="payments_ajax_cancel"),
url(
r"^subscribe/$",
login_required(SubscribeView.as_view()),
name="payments_subscribe"
),
url(
r"^change/card/$",
login_required(ChangeCardView.as_view()),
name="payments_change_card"
),
url(
r"^change/plan/$",
login_required(ChangePlanView.as_view()),
name="payments_change_plan"
),
url(
r"^cancel/$",
login_required(CancelView.as_view()),
name="payments_cancel"
),
url(
r"^history/$",
login_required(HistoryView.as_view()),
name="payments_history"
),
)
|
the-stack_0_18370 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: saveformats.py
#
# Tests: save window formats
#
# Programmer: Mark C. Miller
# Date: September 20, 2005
#
# Modifications:
# Jeremy Meredith, Mon Apr 23 14:07:35 EDT 2007
# Don't create a mesh plot when we're saving geometry formats, since
# the new behavior is to separate plots into different files (as they
# typically have different variables and geometry types).
#
# Mark C. Miller, Wed Jan 20 07:37:11 PST 2010
# Added ability to swtich between Silo's HDF5 and PDB data.
#
# Cyrus Harrison, Tue Feb 2 10:55:43 PST 2010
# Fixed problem w/ setting active window that allowed errors to propagate
# between test cases.
#
# Mark C. Miller, Wed Apr 7 19:02:29 PDT 2010
# Be smarter about testing curve formats while in scalable mode.
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Function: FileExists
#
# Purpose:
# Tests if a file exists and, if the file is being written, waits
# until the file size does not change for growthInterval seconds. If the
# file exists but is of zero size, that is the same as it NOT existing
#
# waitToAppear: number of seconds to wait for the file to first appear
# growhtInterval: number of seconds between successive stats on the file
#
# Programmer: Mark C. Miller
# September 20, 2005
#
# ----------------------------------------------------------------------------
def FileExists(name, waitToAppear, growthInterval):
if os.path.isfile(name) == 0:
time.sleep(waitToAppear)
if os.path.isfile(name) == 0:
return 0
curSize = os.stat(name)[ST_SIZE]
if growthInterval == 0:
if curSize == 0:
return 0
else:
return 1
while 1:
time.sleep(growthInterval)
size = os.stat(name)[ST_SIZE]
if size == curSize:
if curSize == 0:
return 0
else:
return 1
curSize = size
# find tif to rgb image convert utility
if 'VISIT_TEST_CONVERT' in os.environ:
imgConverter = os.environ['VISIT_TEST_CONVERT']
elif (os.path.isfile("/usr/bin/convert")):
imgConverter = "/usr/bin/convert"
else:
imgConverter = "convert"
# ----------------------------------------------------------------------------
# Function: SaveFileInfo
#
# Purpose:
# Return a string representing the appropriate extension for the
# given file format and return bools indicating if the format supports
# curves, images and/or geometry.
#
# Programmer: Mark C. Miller
# September 20, 2005
#
# ----------------------------------------------------------------------------
def SaveFileInfo(fmt):
swa = SaveWindowAttributes()
if (fmt == swa.POSTSCRIPT):
return ("ps", 1, 0, 0)
elif (fmt == swa.CURVE):
return ("curve", 1, 0, 0)
elif (fmt == swa.ULTRA):
return ("ultra", 1, 0, 0)
elif (fmt == swa.BMP):
return ("bmp", 0, 1, 0)
elif (fmt == swa.JPEG):
return ("jpeg", 0, 1, 0)
elif (fmt == swa.PNG):
return ("png", 0, 1, 0)
elif (fmt == swa.PPM):
return ("ppm", 0, 1, 0)
elif (fmt == swa.RGB):
return ("rgb", 0, 1, 0)
elif (fmt == swa.TIFF):
return ("tif", 0, 1, 0)
elif (fmt == swa.STL):
return ("stl", 0, 0, 1)
elif (fmt == swa.OBJ):
return ("obj", 0, 0, 1)
elif (fmt == swa.VTK):
return ("vtk", 0, 0, 1)
else:
return ("unknown", 0, 0, 0)
swa=SaveWindowAttributes()
swa.family = 0
AddWindow()
SetActiveWindow(1)
# I=Image, G=Geometry, C=Curve formats
CFormats=[swa.CURVE, swa.POSTSCRIPT, swa.ULTRA]
IFormats=[swa.BMP, swa.JPEG, swa.PNG, swa.PPM, swa.RGB, swa.TIFF]
GFormats=[swa.STL, swa.OBJ, swa.VTK]
a = AnnotationAttributes()
a.userInfoFlag = 0
a.databaseInfoFlag = 0
a.legendInfoFlag = 0
SetAnnotationAttributes(a)
def TestSaveFormat(fmt):
SetActiveWindow(1)
mode = ""
result = "Failed\n"
(ext, isC, isI, isG) = SaveFileInfo(fmt)
swatmp = swa
swatmp.format = fmt
swatmp.outputToCurrentDirectory = 1
if isI:
swatmp.fileName = "saveformat_tmp.%s"%ext
else:
swatmp.fileName = "saveformat_tmp"
SetSaveWindowAttributes(swatmp)
try:
SaveWindow()
except:
if TestEnv.params["scalable"]:
if GetLastError() == "You cannot save non-image formats (e.g. ultra, curve, stl, etc.)" \
" from a window that is currently in scalable rendering mode. You" \
" may force scalable rendering to Never but if the resulting data" \
" is too big for the viewer to handle, it will likely crash" \
" VisIt. For 3D formats, try an export database operation instead." :
TestText("saveformat_%s%s"%(mode,ext), "Passed\n")
return
TestText("saveformat_%s%s"%(mode,ext), result)
return
# depending on the type of format this is, try to
# read the file we just created back into VisIt and
# put up a plot in window 2. If that succeeds, we'll
# say this format's save actually worked
if isC:
if FileExists(swatmp.fileName+"."+ext, 1, 0):
if ext == "ps":
result = "Passed\n" # can only test existence for ps
else:
SetActiveWindow(2)
if OpenDatabase(swatmp.fileName+"."+ext):
AddPlot("Curve","curve")
if DrawPlots():
result = "Passed\n"
DeleteAllPlots()
CloseDatabase(swatmp.fileName+"."+ext)
elif isI:
if swatmp.screenCapture == 0:
mode = "offscreen_"
tiffFileName = "%s/saveformat_tmp.tif"%TestEnv.params["run_dir"]
tiffFileExists = 0
imageFileExists = FileExists(swatmp.fileName, 1, 0)
# TODO_WINDOWS ?
if imageFileExists:
os.system("%s %s -compress none %s"%(imgConverter, swatmp.fileName, tiffFileName))
tiffFileExists = FileExists(tiffFileName, 1, 0)
if tiffFileExists:
SetActiveWindow(2)
if OpenDatabase(tiffFileName):
AddPlot("Pseudocolor","red")
if DrawPlots():
result = "Passed\n"
DeleteAllPlots()
CloseDatabase(tiffFileName)
elif isG:
if FileExists(swatmp.fileName+"."+ext, 1, 0):
if ext == "stl":
meshName = "STL_mesh"
elif ext == "obj":
meshName = "OBJMesh"
elif ext == "vtk":
meshName = "mesh"
SetActiveWindow(2)
if OpenDatabase(swatmp.fileName+"."+ext):
AddPlot("Mesh",meshName)
if DrawPlots():
result = "Passed\n"
DeleteAllPlots()
CloseDatabase(swatmp.fileName+"."+ext)
TestText("saveformat_%s%s"%(mode,ext), result)
SetActiveWindow(1)
TestSection("Curve Formats")
OpenDatabase(data_path("curve_test_data","c062.curve"))
AddPlot("Curve", "going_down")
DrawPlots()
for f in CFormats:
TestSaveFormat(f)
TestSection("Image Formats via Screen Capture")
DeleteAllPlots()
CloseDatabase(data_path("curve_test_data","c062.curve"))
OpenDatabase(silo_data_path("multi_rect2d.silo"))
AddPlot("Mesh", "mesh1")
AddPlot("Pseudocolor", "d")
DrawPlots()
slider = CreateAnnotationObject("TimeSlider")
for f in IFormats:
TestSaveFormat(f)
TestSection("Image Formats via Off Screen")
swa.screenCapture = 0
for f in IFormats:
TestSaveFormat(f)
swa.screenCapture = 1
TestSection("Geometry Formats")
DeleteAllPlots()
CloseDatabase(silo_data_path("multi_rect2d.silo"))
OpenDatabase(silo_data_path("globe.silo"))
AddPlot("Pseudocolor", "dx")
DrawPlots()
for f in GFormats:
TestSaveFormat(f)
Exit()
|
the-stack_0_18372 | # -*- coding: utf-8 -*-
"""Tracker module.
This module contains a Kalman filter class as the generic filter, and a tracker
class for the object tracking in image plane, containing Kalman filter
instances, in-track records, and a dictionary for attributes.
"""
from __future__ import division
# package dependency
import numpy as np
from scipy.linalg import inv
from absl import logging
class tracker():
"""Tracker class.
This class works as an entity of a tracked object, containing the filters for
the bounding box on image plan and its depth information, the tracker ID, the
in-track status, and the dictionary for extra attributes which are not parts
of filtering algorithm.
Attributes:
tid (int): A unique ID of a tracker.
f_bbox (kalman_filter): Kalman filter for filtering the bounding box.
f_depth (kalman_filter): Kalman filter for filtering the depth information.
est_dict (dict): Dictionary for tracker attributes.
last_update (int): Frame index of the last update.
in_track (bool): In-track status, false for out-of-track.
"""
def __init__(self, config, tid, bbox, depth, est_dict):
"""__init__ method.
Args:
config (dict): Configuration of tracker.
tid (int): A unique ID of a tracker.
bbox (list): Bounding box in a list as [xmin, ymin, xmax, ymax].
depth (float): Estimated depth.
est_dict (dict): Dictionary for tracker attributes.
"""
assert bbox.shape[0]==4
self.tid = tid
self.f_bbox = kalman_filter(x_pos=np.array(bbox), rounding=True,
state_cov=config['bbox_cov'], meas_cov=config['bbox_cov'])
# note: the state_cov and meas_cov has to be adjusted
# if the pixel2meter_scale changes.
# the best way is to calculate actual variance from the estimator outputs
self.f_depth = kalman_filter(x_pos=np.array([depth]), rounding=False,
state_cov=config['depth_cov'], meas_cov=config['depth_cov'])
self.est_dict = est_dict
self.last_update = 0
self.in_track = False
def predict(self):
"""Predict method. All filters perform predict method."""
b_ = self.f_bbox.predict()
d_ = self.f_depth.predict()
def update(self, time_stamp, bbox, depth, est_dict):
"""Update method.
All filters perform update method, with attributes and time stamp saved.
If the tracker does not receive updates, this method will then not be
called, leaving all these attributes not updated.
Args:
time_stamp (int): Frame index of this update.
bbox (list): Bounding box in a list as [xmin, ymin, xmax, ymax].
depth (float): Estimated depth.
est_dict (dict): Dictionary for tracker attributes.
"""
self.f_bbox.update(bbox)
self.f_depth.update(depth)
self.est_dict = est_dict
self.last_update = time_stamp
def add_attr_to_est_dict(self, key, value):
"""Add an attribute to dictionary `est_dict`.
Args:
key (str): Key of attribute.
value (obj): Value of attribute.
"""
self.est_dict[key] = value
def get_bbox(self):
""" Get bounding box.
Return:
bbox (list): Bounding box in a list as [xmin, ymin, xmax, ymax].
"""
return self.f_bbox.x[:4]
def get_depth(self):
""" Get depth information.
Return:
depth (float): Tracked depth.
"""
return self.f_depth.x[0]
def get_est_dict(self):
""" Get attributes.
Return:
est_dict (dict): Dictionary for tracker attributes.
"""
return self.est_dict
def update_status(self, time_stamp):
"""Status update method.
This method checks the status of this tracker. If this tracker has not been
updated once within `loos_track_threshold` frames, it will be considered as
lost with `in_track` labeled `false`. Once labeled lost, it could be
deleted (or not) depending on the garbage collection implemented outsite
tracker class.
Args:
time_stamp (int): Frame index of this update.
"""
loose_track_threshold = 5
self.in_track = False
if(time_stamp - self.last_update < loose_track_threshold):
self.in_track = True
def get_status(self):
""" Get tracker status.
Return:
in_track (bool): In-track status, false for out-of-track.
"""
return self.in_track
class kalman_filter():
def __init__(self,
x_pos=np.array([0]),
state_model='const_velo',
state_cov=10.0,
proc_cov=1.0,
meas_model='pos',
meas_cov=1.0,
rounding=False):
logging.debug('Kalman filter initialization with init pos {}'.format(x_pos))
# time step
self.dt = 1
# configuration
self.rounding = rounding
# state vector and state model
x_dim = x_pos.shape[0]
self.x_dim = x_dim
self.x = np.concatenate((x_pos, np.zeros(x_dim))) # position and velocity
self.P = state_cov * np.eye(x_dim*2)
if state_model == 'const_velo':
# x(k) = F x(k-1) + G a(k)
# where a(k) is a random variable with
# Gaussian(0, proc_cov)
# F = [ I I*dt]
# [ 0 I ]
self.F = np.block([
[np.eye(x_dim), np.eye(x_dim)*self.dt],
[np.zeros((x_dim,x_dim)), np.eye(x_dim)]])
# G = (0.5*dt^2 dt)^T
# Q = (G*G^T)*proc_cov = proc_cov * [I*(dt^4)/4 I*(dt^3)/4]
# [I*(dt^3)/2 I*(dt^2) ]
self.Q = proc_cov * np.block([
[np.eye(x_dim)*(self.dt**4)/4, np.eye(x_dim)*(self.dt**3)/2],
[np.eye(x_dim)*(self.dt**3)/2, np.eye(x_dim)*(self.dt**2)]])
else:
raise(ValueError, 'invalid state model')
# measurement model
if meas_model == 'pos':
# H = [I 0]
self.H = np.block([np.eye(x_dim), np.zeros((x_dim,x_dim))])
self.R = meas_cov * np.eye(x_dim)
else:
raise(ValueError, 'invalid measurement model')
def round_pos(self):
self.x[:self.x_dim] = np.round(self.x[:self.x_dim])
def predict(self):
# prior prediction
self.x = self.F.dot(self.x)
self.P = self.F.dot(self.P).dot(self.F.T) + self.Q
if self.rounding:
self.round_pos()
return self.x
def update(self, z):
# innovation
y = z - self.H.dot(self.x)
S = self.R + self.H.dot(self.P).dot(self.H.T)
# Kalman gain
K = self.P.dot(self.H.T).dot(inv(S))
# posterior update
self.x += K.dot(y)
if self.rounding:
self.round_pos()
self.P -= K.dot(self.H).dot(self.P)
return self.x, K
# testing code
def test(_):
import random
import matplotlib.pyplot as plt
t = tracker(tid=0, bbox=np.array([0,0,0,0]), depth=0, est_dict={'label':0})
x_bx, x_by, x_d = [], [], []
z_bx, z_by, z_d = [], [], []
z_bbox = np.array([2,3,4,5])
z_depth = np.array([3.3])
for i in range(50):
print('time =', i)
t.predict()
x_bx.append(0.5*(t.get_bbox()[0] + t.get_bbox()[2]))
x_by.append(0.5*(t.get_bbox()[1] + t.get_bbox()[3]))
x_d.append(t.get_depth())
print('predection:\nbbox={}, depth={}'.format(t.get_bbox(), t.get_depth()))
z_bx.append(0.5*(z_bbox[0] + z_bbox[2]))
z_by.append(0.5*(z_bbox[1] + z_bbox[3]))
z_d.append(z_depth.copy())
print('obserzation:\nbbox={}, depth={}'.format(z_bbox, z_depth))
t.update(time_stamp=i, bbox=z_bbox, depth=z_depth, est_dict={'label':0})
print('posterior:\nbbox={}, depth={}'.format(t.get_bbox(), t.get_depth()))
# object move
z_bbox += np.array([20, 20, 40, 40]) # const speed
z_bbox += np.random.randint(low=-10, high=10, size=4) # random walk
z_depth += 0.4 # const speed
z_depth += 0.5*np.random.randn() # random walk
plt.plot(x_bx, x_by, marker='+')
plt.plot(z_bx, z_by, marker='.')
plt.legend(['x', 'z'], loc='upper left')
plt.show()
plt.plot(x_d, marker='+')
plt.plot(z_d, marker='.')
plt.legend(['x', 'z'], loc='upper left')
plt.show()
if __name__ == "__main__":
from absl import app, logging
logging.set_verbosity(logging.DEBUG)
app.run(test)
|
the-stack_0_18374 | from keras.layers import Embedding , Dense , Flatten
from keras.layers.convolutional import Conv1D , MaxPooling1D
from keras.models import Sequential
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
class Embedding_Model:
def __init__(self):
pass
def build_model(self ,X_train , y_train, X_test , y_test , vocab_siz , vector_dim , input_length):
model = Sequential()
model.add(Embedding(vocab_siz , vector_dim , input_length=input_length))
model.add( Conv1D( filters=32 , kernel_size=8 , activation='relu' ) )
model.add( Conv1D( filters=64 , kernel_size=8 , activation='relu' ) )
model.add(MaxPooling1D(6))
model.add( Conv1D( filters=128 , kernel_size=8 , activation='relu' ) )
model.add(MaxPooling1D(4))
model.add(Flatten())
model.add( Dense( 128 , activation='relu' ) )
model.add( Dense( 1 , activation='sigmoid' ) )
model.compile( loss='binary_crossentropy' , optimizer='rmsprop' , metrics=['accuracy'] )
model.fit(X_train , y_train , validation_split=0.2 , batch_size=16 , epochs=15 , verbose=1)
#loss , acc = model.evaluate(X_test , y_test)
print( model.predict(X_test) )
|
the-stack_0_18376 | import ctypes
from ctypes import *
from ctypes.wintypes import *
from win32defs import *
class SymOpts(object):
SYMOPT_EXACT_SYMBOLS = 0x00000400
SYMOPT_DEBUG = 0x80000000
SYMOPT_UNDNAME = 0x00000002
windll.kernel32.LoadLibraryA('dbghelp.dll')
dbghelp = windll.dbghelp
class Flag(int):
def __new__(cls, name, value):
return super(Flag, cls).__new__(cls, value)
def __init__(self, name, value):
self.name = name
def __repr__(self):
return "{0}({1:#x})".format(self.name, self)
# Custom __str__ removed for multiple reason
# Main one -> it breaks the json encoding of structure with flags :)
# Moving to a new politic -> if people want the name in a string use {x!r}
# The __str__ of security descriptor & guid will change soon as well :)
# __str__ = __repr__
# Fix pickling with protocol 2
def __getnewargs__(self, *args):
return self.name, int(self)
class FlagMapper(dict):
def __init__(self, *values):
self.update({x:x for x in values})
def __missing__(self, key):
return key
class EnumValue(Flag):
def __new__(cls, enum_name, name, value):
return super(EnumValue, cls).__new__(cls, name, value)
def __init__(self, enum_name, name, value):
self.enum_name = enum_name
self.name = name
def __repr__(self):
return "{0}.{1}({2})".format(self.enum_name, self.name, hex(self))
# Fix pickling with protocol 2
def __getnewargs__(self, *args):
return self.enum_name, self.name, int(self)
class EnumType(DWORD):
values = ()
mapper = {}
@property
def value(self):
raw_value = super_noissue(EnumType, self).value
return self.mapper.get(raw_value, raw_value)
def __repr__(self):
raw_value = super_noissue(EnumType, self).value
if raw_value in self.values:
value = self.value
return "<{0} {1}({2})>".format(type(self).__name__, value.name, hex(raw_value))
return "<{0}({1})>".format(type(self).__name__, hex(self.value))
SymTagNull = EnumValue("_SymTagEnum", "SymTagNull", 0x0)
SymTagExe = EnumValue("_SymTagEnum", "SymTagExe", 0x1)
SymTagCompiland = EnumValue("_SymTagEnum", "SymTagCompiland", 0x2)
SymTagCompilandDetails = EnumValue("_SymTagEnum", "SymTagCompilandDetails", 0x3)
SymTagCompilandEnv = EnumValue("_SymTagEnum", "SymTagCompilandEnv", 0x4)
SymTagFunction = EnumValue("_SymTagEnum", "SymTagFunction", 0x5)
SymTagBlock = EnumValue("_SymTagEnum", "SymTagBlock", 0x6)
SymTagData = EnumValue("_SymTagEnum", "SymTagData", 0x7)
SymTagAnnotation = EnumValue("_SymTagEnum", "SymTagAnnotation", 0x8)
SymTagLabel = EnumValue("_SymTagEnum", "SymTagLabel", 0x9)
SymTagPublicSymbol = EnumValue("_SymTagEnum", "SymTagPublicSymbol", 0xa)
SymTagUDT = EnumValue("_SymTagEnum", "SymTagUDT", 0xb)
SymTagEnum = EnumValue("_SymTagEnum", "SymTagEnum", 0xc)
SymTagFunctionType = EnumValue("_SymTagEnum", "SymTagFunctionType", 0xd)
SymTagPointerType = EnumValue("_SymTagEnum", "SymTagPointerType", 0xe)
SymTagArrayType = EnumValue("_SymTagEnum", "SymTagArrayType", 0xf)
SymTagBaseType = EnumValue("_SymTagEnum", "SymTagBaseType", 0x10)
SymTagTypedef = EnumValue("_SymTagEnum", "SymTagTypedef", 0x11)
SymTagBaseClass = EnumValue("_SymTagEnum", "SymTagBaseClass", 0x12)
SymTagFriend = EnumValue("_SymTagEnum", "SymTagFriend", 0x13)
SymTagFunctionArgType = EnumValue("_SymTagEnum", "SymTagFunctionArgType", 0x14)
SymTagFuncDebugStart = EnumValue("_SymTagEnum", "SymTagFuncDebugStart", 0x15)
SymTagFuncDebugEnd = EnumValue("_SymTagEnum", "SymTagFuncDebugEnd", 0x16)
SymTagUsingNamespace = EnumValue("_SymTagEnum", "SymTagUsingNamespace", 0x17)
SymTagVTableShape = EnumValue("_SymTagEnum", "SymTagVTableShape", 0x18)
SymTagVTable = EnumValue("_SymTagEnum", "SymTagVTable", 0x19)
SymTagCustom = EnumValue("_SymTagEnum", "SymTagCustom", 0x1a)
SymTagThunk = EnumValue("_SymTagEnum", "SymTagThunk", 0x1b)
SymTagCustomType = EnumValue("_SymTagEnum", "SymTagCustomType", 0x1c)
SymTagManagedType = EnumValue("_SymTagEnum", "SymTagManagedType", 0x1d)
SymTagDimension = EnumValue("_SymTagEnum", "SymTagDimension", 0x1e)
class _SymTagEnum(EnumType):
values = [SymTagNull, SymTagExe, SymTagCompiland, SymTagCompilandDetails, SymTagCompilandEnv, SymTagFunction, SymTagBlock, SymTagData, SymTagAnnotation, SymTagLabel, SymTagPublicSymbol, SymTagUDT, SymTagEnum, SymTagFunctionType, SymTagPointerType, SymTagArrayType, SymTagBaseType, SymTagTypedef, SymTagBaseClass, SymTagFriend, SymTagFunctionArgType, SymTagFuncDebugStart, SymTagFuncDebugEnd, SymTagUsingNamespace, SymTagVTableShape, SymTagVTable, SymTagCustom, SymTagThunk, SymTagCustomType, SymTagManagedType, SymTagDimension]
mapper = FlagMapper(*values)
SymTagEnum = _SymTagEnum
class GUID(Structure):
_fields_ = [("Data1", DWORD),
("Data2", WORD),
("Data3", WORD),
("Data4", BYTE * 8)]
class SYMBOL_INFO(Structure):
_fields_ = [
('SizeOfStruct', DWORD),
('TypeIndex', DWORD),
('Reserved', ULONGLONG*2),
('Index', DWORD),
('Size', DWORD),
('ModBase', ULONGLONG),
('Flags', DWORD),
('Value', ULONGLONG),
('Address', ULONGLONG),
('Register', DWORD),
('Scope', DWORD),
('Tag', DWORD),
('NameLen', DWORD),
('MaxNameLen', DWORD),
('Name', c_char*1)
]
PSYMBOL_INFO = POINTER(SYMBOL_INFO)
class IMAGEHLP_MODULE (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("BaseOfImage", DWORD),
("ImageSize", DWORD),
("TimeDateStamp", DWORD),
("CheckSum", DWORD),
("NumSyms", DWORD),
("SymType", DWORD), # SYM_TYPE
("ModuleName", CHAR * 32),
("ImageName", CHAR * 256),
("LoadedImageName", CHAR * 256),
]
PIMAGEHLP_MODULE = POINTER(IMAGEHLP_MODULE)
class IMAGEHLP_MODULE64 (Structure):
_fields_ = [
("SizeOfStruct", DWORD),
("BaseOfImage", DWORD64),
("ImageSize", DWORD),
("TimeDateStamp", DWORD),
("CheckSum", DWORD),
("NumSyms", DWORD),
("SymType", DWORD), # SYM_TYPE
("ModuleName", CHAR * 32),
("ImageName", CHAR * 256),
("LoadedImageName", CHAR * 256),
("LoadedPdbName", CHAR * 256),
("CVSig", DWORD),
("CVData", CHAR * (MAX_PATH * 3)),
("PdbSig", DWORD),
("PdbSig70", GUID),
("PdbAge", DWORD),
("PdbUnmatched", BOOL),
("DbgUnmatched", BOOL),
("LineNumbers", BOOL),
("GlobalSymbols", BOOL),
("TypeInfo", BOOL),
("SourceIndexed", BOOL),
("Publics", BOOL),
]
PIMAGEHLP_MODULE64 = POINTER(IMAGEHLP_MODULE64)
# typedef struct _IMAGEHLP_STACK_FRAME {
# ULONG64 InstructionOffset;
# ULONG64 ReturnOffset;
# ULONG64 FrameOffset;
# ULONG64 StackOffset;
# ULONG64 BackingStoreOffset;
# ULONG64 FuncTableEntry;
# ULONG64 Params[4];
# ULONG64 Reserved[5];
# BOOL Virtual;
# ULONG Reserved2;
# } IMAGEHLP_STACK_FRAME, *PIMAGEHLP_STACK_FRAME;
class _IMAGEHLP_STACK_FRAME(Structure):
_fields_ = [
("InstructionOffset", ULONG64),
("ReturnOffset", ULONG64),
("FrameOffset", ULONG64),
("StackOffset", ULONG64),
("BackingStoreOffset", ULONG64),
("FuncTableEntry", ULONG64),
("Params", ULONG64 * (4)),
("Reserved", ULONG64 * (5)),
("Virtual", BOOL),
("Reserved2", ULONG),
]
IMAGEHLP_STACK_FRAME = _IMAGEHLP_STACK_FRAME
PIMAGEHLP_STACK_FRAME = POINTER(_IMAGEHLP_STACK_FRAME)
SymInitialize = dbghelp.SymInitialize
SymInitialize.argtypes = [
HANDLE,
PVOID,
c_bool
]
SymCleanup = dbghelp.SymCleanup
SymCleanup.argtypes = [
HANDLE
]
SymGetOptions = dbghelp.SymGetOptions
SymGetOptions.restype = DWORD
SymSetOptions = dbghelp.SymSetOptions
# BOOL IMAGEAPI SymGetModuleInfo(
# HANDLE hProcess,
# DWORD dwAddr,
# PIMAGEHLP_MODULE ModuleInfo
# );
SymGetModuleInfo = dbghelp.SymGetModuleInfo
SymGetModuleInfo.restype = bool
SymGetModuleInfo.argtypes = [
HANDLE,
DWORD,
PIMAGEHLP_MODULE
]
# BOOL IMAGEAPI SymGetModuleInfo64(
# HANDLE hProcess,
# DWORD64 qwAddr,
# PIMAGEHLP_MODULE64 ModuleInfo
# );
SymGetModuleInfo64 = dbghelp.SymGetModuleInfo64
SymGetModuleInfo64.restype = bool
SymGetModuleInfo64.argtypes = [
HANDLE,
ULONGLONG,
PIMAGEHLP_MODULE64
]
SymSetSearchPath = dbghelp.SymSetSearchPath
SymSetSearchPath.argtypes = [
HANDLE,
c_wchar_p
]
# DWORD64 IMAGEAPI SymLoadModule64(
# HANDLE hProcess,
# HANDLE hFile,
# PCSTR ImageName,
# PCSTR ModuleName,
# DWORD64 BaseOfDll,
# DWORD SizeOfDll
# );
SymLoadModule64 = dbghelp.SymLoadModule64
SymLoadModule64.restype = DWORD64
SymLoadModule64.argtypes = [
HANDLE,
HANDLE,
PCSTR,
PCSTR,
DWORD64,
DWORD
]
# DWORD64 IMAGEAPI SymLoadModuleEx(
# HANDLE hProcess,
# HANDLE hFile,
# PCSTR ImageName,
# PCSTR ModuleName,
# DWORD64 BaseOfDll,
# DWORD DllSize,
# PMODLOAD_DATA Data,
# DWORD Flags
# );
SymLoadModuleEx = dbghelp.SymLoadModuleEx
SymLoadModuleEx.restype = PVOID
SymLoadModuleEx.argtypes = [
HANDLE,
HANDLE,
PCSTR,
PCSTR,
PVOID,
DWORD,
PVOID,
DWORD
]
# BOOL IMAGEAPI SymEnumSymbols(
# HANDLE hProcess,
# ULONG64 BaseOfDll,
# PCSTR Mask,
# PSYM_ENUMERATESYMBOLS_CALLBACK EnumSymbolsCallback,
# PVOID UserContext
# );
SymEnumSymbols = dbghelp.SymEnumSymbols
SymEnumSymbols.restype = bool
SymEnumSymbols.argtypes = [
HANDLE,
ULONG64,
PCSTR,
PVOID,
PVOID
]
# BOOL IMAGEAPI SymFromAddr(
# HANDLE hProcess,
# DWORD64 Address,
# PDWORD64 Displacement,
# PSYMBOL_INFO Symbol
# );
SymFromAddr = dbghelp.SymFromAddr
SymFromAddr.restype = bool
SymFromAddr.argtypes = [
HANDLE,
DWORD64,
PDWORD64,
PSYMBOL_INFO
]
# BOOL IMAGEAPI SymSetContext(
# HANDLE hProcess,
# PIMAGEHLP_STACK_FRAME StackFrame,
# PIMAGEHLP_CONTEXT Context
# );
SymSetContext = dbghelp.SymSetContext
SymSetContext.restype = bool
SymSetContext.argtypes = [
HANDLE,
PIMAGEHLP_STACK_FRAME,
PVOID
]
|
the-stack_0_18378 | # Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""\
PyEDDL is a Python wrapper for EDDL, the European Distributed Deep
Learning library.
"""
import os
from setuptools import setup, Extension
import pybind11
from pyeddl.version import VERSION
def to_bool(s):
s = s.lower()
return s != "off" and s != "false"
EXTRA_COMPILE_ARGS = ['-std=c++11', '-fvisibility=hidden']
LIBRARIES = ["eddl"]
if "EDDL_WITH_CUDA" in os.environ:
LIBRARIES.extend(["cudart", "cublas", "curand"])
INCLUDE_DIRS = [
"src",
pybind11.get_include(),
pybind11.get_include(user=True)
]
LIBRARY_DIRS = []
RUNTIME_LIBRARY_DIRS = []
EDDL_DIR = os.getenv("EDDL_DIR")
if EDDL_DIR:
INCLUDE_DIRS.extend([os.path.join(EDDL_DIR, "include")])
LIBRARY_DIRS.extend([os.path.join(EDDL_DIR, "lib")])
RUNTIME_LIBRARY_DIRS.extend([os.path.join(EDDL_DIR, "lib")])
# optional modules, on by default. Set env var to "OFF" or "FALSE" to disable
EDDL_WITH_PROTOBUF = to_bool(os.getenv("EDDL_WITH_PROTOBUF", "ON"))
if EDDL_WITH_PROTOBUF:
EXTRA_COMPILE_ARGS.append('-DEDDL_WITH_PROTOBUF')
ext = Extension(
"pyeddl._core",
sources=["src/_core.cpp"],
include_dirs=INCLUDE_DIRS,
library_dirs=LIBRARY_DIRS,
runtime_library_dirs=RUNTIME_LIBRARY_DIRS,
libraries=LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
)
setup(
name="pyeddl",
version=VERSION,
url="https://github.com/deephealthproject/pyeddl",
description="Python wrapper for EDDL",
long_description=__doc__,
author="Simone Leo",
author_email="<[email protected]>",
license="BSD",
platforms=["Linux"],
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
],
packages=["pyeddl"],
ext_modules=[ext],
install_requires=["setuptools", "pybind11<2.6", "numpy"],
zip_safe=False,
)
|
the-stack_0_18379 | import random
from collections import Counter
import numpy as np
actions = []
rewards = []
opponent_picks = []
counts = {}
prior = {"n": 1, "h": 1}
def oppo_action(last_actions, my_action):
a1, a2 = last_actions
if a1 == my_action:
return int(a2)
return int(a1)
def multi_armed_bandit_agent(observation, configuration):
global actions, rewards, counts
if len(counts) == 0:
for i in range(configuration.banditCount):
counts[i] = prior
if len(observation.lastActions) > 0:
rewards.append(observation.reward)
opponent_picks.append(oppo_action(observation.lastActions, actions[-1]))
reward_t2 = rewards[-2] if len(rewards) >= 2 else 0
reward_t1 = rewards[-1] if len(rewards) > 0 else 0
counts[actions[-1]] = {
"n": counts[actions[-1]]["n"] + 1,
"h": counts[actions[-1]]["h"] + (reward_t1 - reward_t2)
}
action = random.randrange(configuration.banditCount)
if observation.step > 1:
action = oppo_action(observation.lastActions, actions[-1])
if observation.step > 10:
pvals = np.array([np.random.beta(d['n'], d['h']) for d in counts.values()])
pvals = pvals/pvals.sum()
action = int(np.random.choice([i for i in range(len(counts))], p=pvals/pvals.sum()))
actions.append(action)
return action |
the-stack_0_18384 | from setuptools import setup, find_packages
from runpy import run_path
# Get the version from the relevant file
d = run_path('fluidfoam/_version.py')
__version__ = d['__version__']
# Get the development status from the version string
if 'a' in __version__:
devstatus = 'Development Status :: 3 - Alpha'
elif 'b' in __version__:
devstatus = 'Development Status :: 4 - Beta'
else:
devstatus = 'Development Status :: 5 - Production/Stable'
setup(
name="fluidfoam",
version=__version__,
packages=find_packages(exclude=['tutorials']),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=['numpy>=1.11', 'scipy>=0.17',
'matplotlib>=1.5'],
# metadata for upload to PyPI
author = "Cyrille Bonamy",
author_email = "[email protected]",
description = "Openfoam PostProcessing Python Tools",
license = 'GPLv2',
keywords = ["Openfoam", "postprocessing", "CFD"],
url = "http://legi.grenoble-inp.fr", # project home page, if any
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
devstatus,
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
# actually CeCILL License (GPL compatible license for French laws)
#
# Specify the Python versions you support here. In particular,
# ensure that you indicate whether you support Python 2,
# Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
])
|
the-stack_0_18385 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.contrib import slim
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
num_classes = 2
img_height, img_width = 32, 32
channel = 3
def Generator(x):
base = 128
x = slim.fully_connected(x, base, activation_fn=tf.nn.leaky_relu, normalizer_fn=lambda x: x, reuse=tf.AUTO_REUSE, scope='g_dense1')
x = slim.fully_connected(x, base * 2, activation_fn=tf.nn.leaky_relu, normalizer_fn=lambda x:x, reuse=tf.AUTO_REUSE, scope='g_dense2')
x = slim.fully_connected(x, base * 4, activation_fn=tf.nn.leaky_relu, normalizer_fn=lambda x:x, reuse=tf.AUTO_REUSE, scope='g_dense3')
x = slim.fully_connected(x, img_height * img_width * channel, normalizer_fn=lambda x:x, reuse=tf.AUTO_REUSE, scope='g_dense4')
x = tf.reshape(x, [-1, img_height, img_width, channel])
x = tf.nn.tanh(x)
return x
def Discriminator(x):
base = 64
x = slim.fully_connected(x, base * 2, activation_fn=tf.nn.leaky_relu, reuse=tf.AUTO_REUSE, scope="d_dense1")
x = slim.fully_connected(x, base, activation_fn=tf.nn.leaky_relu, reuse=tf.AUTO_REUSE, scope="d_dense2")
x = slim.flatten(x)
x = slim.fully_connected(x, 1, activation_fn=None, reuse=tf.AUTO_REUSE, scope="d_dense")
return x
CLS = {'background': [0,0,0],
'akahara': [0,0,128],
'madara': [0,128,0]}
# get train data
def data_load(path, hf=False, vf=False, rot=False):
xs = []
ts = []
paths = []
data_num = 0
for dir_path in glob(path + '/*'):
data_num += len(glob(dir_path + "/*"))
pbar = tqdm(total = data_num)
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
if channel == 1:
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x = x / 127.5 - 1
if channel == 1:
x = x[..., None]
else:
x = x[..., ::-1]
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot != False:
angle = 0
scale = 1
while angle < 360:
angle += rot
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(_x)
ts.append(t)
paths.append(path)
pbar.update(1)
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
#xs = np.transpose(xs, (0,3,1,2))
pbar.close()
return xs, paths
# train
def train():
tf.reset_default_graph()
# place holder
X = tf.placeholder(tf.float32, [None, 100])
X2 = tf.placeholder(tf.float32, [None, img_height, img_width, channel])
Y = tf.placeholder(tf.float32, [None, 1])
keep_prob = tf.placeholder(tf.float32)
g_logits = Generator(X)
d_logits = Discriminator(X2)
gan_logits = Discriminator(g_logits)
tvars = tf.trainable_variables()
d_preds = d_logits
D_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits, labels=Y))
#loss = tf.reduce_mean(tf.square(logits - Y))
D_optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
D_vars = [var for var in tvars if 'd_' in var.name]
D_train = D_optimizer.minimize(D_loss, var_list=D_vars)
gan_preds = gan_logits
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=gan_logits, labels=Y))
G_optimizer = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
G_vars = [var for var in tvars if 'g_' in var.name]
G_train = G_optimizer.minimize(G_loss, var_list=G_vars)
xs, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=1)
# training
mb = 64
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
#d_losses = [0]
#g_losses = [0]
#ites = [0]
#fig, ax = plt.subplots(1, 1)
#lines, = ax.plot(d_losses, g_losses)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list="0"
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for ite in range(10000):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = xs[mb_ind]
input_noise = np.random.uniform(-1, 1, size=(mb, 100))
g_output = sess.run(g_logits, feed_dict={X: input_noise})
_X = np.concatenate([x, g_output])
_Y = np.array([1] * mb + [0] * mb, dtype=np.float32)
_Y = _Y[..., None]
_, d_loss = sess.run([D_train, D_loss], feed_dict={X2:_X, Y:_Y})
_Y = np.array([1] * mb, dtype=np.float32)
_Y = _Y[..., None]
_, g_loss = sess.run([G_train, G_loss], feed_dict={X:input_noise, Y: _Y})
#d_losses.append(d_loss)
#g_losses.append(g_loss)
#ites.append(ite + 1)
#lines.set_data(ites, d_losses)
#ax.set_xlim((0, ite+2))
#plt.pause(0.001)
if (ite+1) % 100 == 0:
print("iter >>", ite+1, ',G:loss >>', g_loss, ',D:loss >>', d_loss)
saver = tf.train.Saver()
saver.save(sess, './cnn.ckpt')
# test
def test():
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, 100])
logits = Generator(X)
np.random.seed(100)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list="0"
with tf.Session(config=config) as sess:
saver = tf.train.Saver()
saver.restore(sess, "./cnn.ckpt")
for i in range(3):
input_noise = np.random.uniform(-1, 1, size=(10, 100))
g_output = sess.run(logits, feed_dict={X: input_noise})
g_output = (g_output + 1 ) / 2
for i in range(10):
gen = g_output[i]
plt.subplot(1,10,i+1)
plt.imshow(gen)
plt.axis('off')
plt.show()
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.