ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a56e0a38c5633f647ce068399091aca025e1c5a
|
from socket import *
import threading
class RecvThread(threading.Thread):
def __init__(self, s, bufsize):
if not isinstance(s, socket):
raise TypeError
super(RecvThread, self).__init__()
self.s = s
self.bufsize = bufsize
def run(self):
while True:
data = self.s.recv(self.bufsize).decode()
if not data:
break
print(f'\r{data} \n> ', end='')
class SendThread(threading.Thread):
def __init__(self, s):
if not isinstance(s, socket):
raise TypeError
super(SendThread, self).__init__()
self.s = s
def run(self):
while True:
data = input('> ').encode()
self.s.send(data)
|
py
|
1a56e10af70b0ec1235a27fc62226c78b34a39d4
|
from typing import List, Any, Sequence
from .util import MLP, ThreadedIterator, SMALL_NUMBER
import tensorflow as tf
import numpy as np
import time
import pickle
import os
import shutil
class GGNN(object):
@classmethod
def default_params(cls):
return {
'num_epochs': 1,
'patience': 250,
'learning_rate': 0.001,
'clamp_gradient_norm': 1.0,
'out_layer_dropout_keep_prob': 1.0,
'hidden_size': 200,
'num_timesteps': 4,
'use_graph': True,
'task_ids': [0],
'random_seed': 0,
}
def __init__(self, data_training, data_testing, params=None, restore_file=None, freeze_graph_model=False,
log_dir="./logged_models", cleanup=False):
"""
Basic GGNN class that needs to be extended for use.
:param data_training: data set of PIGs for training [list].
:param data_testing: data set of PIGs for validation [list].
:param params: hyperparameters of the model [dict].
:param restore_file: path to a model that should be restored [str].
:param freeze_graph_model: do not train parameters of graph model (i.e. model is not trained) [bool].
:param log_dir: directory where the model is stored [str].
:param cleanup: clean directory, where the model is stored, before storing it [bool].
"""
# Collect parameters
store_params = params
self.params = self.default_params()
if store_params is not None:
self.params.update(store_params)
# Load data
self.max_num_vertices = 0
self.num_edge_types = 0
self.annotation_size = 0
self.train_data = self.load_data(data_training, is_training_data=True)
self.valid_data = self.load_data(data_testing, is_training_data=False)
self.freeze_graph_model = freeze_graph_model
self.restore = restore_file
# Safe best models/cleanup previous models
if cleanup:
shutil.rmtree(log_dir, ignore_errors=True)
self.log_dir = log_dir
os.makedirs(log_dir, exist_ok=True)
# Path to best model
self.best_model_file = os.path.join(log_dir,
"%s_best_model.pickle" % "_".join([time.strftime("%Y-%m-%d-%H-%M")]))
# Build the actual GGNN model
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph, config=config)
with self.graph.as_default():
tf.set_random_seed(self.params['random_seed'])
self.placeholders = {}
self.weights = {}
self.ops = {}
self.make_model()
self.make_train_step()
# Restore/initialize variables
if restore_file is not None:
self.restore_model(restore_file)
else:
self.initialize_model()
@staticmethod
def graph_string_to_array(graph_string: str) -> List[List[int]]:
"""
Returns graph string from string.
:param graph_string: graph as string [str].
:return: graph string as array [list].
"""
return [[int(v) for v in s.split(' ')]
for s in graph_string.split('\n')]
def load_data(self, data, is_training_data: bool):
"""
Loads data
:param data: list of graphs [list]
A graph = {targets, graph, node_features}
:param is_training_data: boolean flag if data is for training or not [bool]
:return: raw process graphs [list]
A raw process graph = {adjacency_lists [dict], num_incoming_edge per_type [dict],
init [list], labels [list]}
"""
num_fwd_edge_types = 0
for g in data:
self.max_num_vertices = max(self.max_num_vertices, max([v for e in g['graph'] for v in [e[0], e[2]]]))
num_fwd_edge_types = max(num_fwd_edge_types, max([e[1] for e in g['graph']]))
self.num_edge_types = max(self.num_edge_types, num_fwd_edge_types)
self.annotation_size = max(self.annotation_size, len(data[0]["node_features"][0]))
return self.process_raw_graphs(data, is_training_data)
def process_raw_graphs(self, raw_data: Sequence[Any], is_training_data: bool) -> Any:
raise Exception("Models have to implement process_raw_graphs!")
def make_model(self):
"""
Makes the GGNN model.
:return: none.
"""
# Create placeholders for the GGNN model
self.placeholders['target_values'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_values')
self.placeholders['target_mask'] = tf.placeholder(tf.float32, [len(self.params['task_ids']), None],
name='target_mask')
self.placeholders['num_graphs'] = tf.placeholder(tf.int32, [], name='num_graphs')
self.placeholders['out_layer_dropout_keep_prob'] = tf.placeholder(tf.float32, [],
name='out_layer_dropout_keep_prob')
# Start message passing phase (i.e. update of node representations)
with tf.variable_scope("graph_mode"):
self.prepare_specific_graph_model()
if self.params['use_graph']:
self.ops['final_node_representations'] = self.compute_final_node_representations()
else:
self.ops['final_node_representations'] = tf.zeros_like(self.placeholders['initial_node_representation'])
# Start readout phase (i.e. mapping of node representations to output
self.ops['losses'] = []
for (internal_id, task_id) in enumerate(self.params['task_ids']):
with tf.variable_scope("out_layer_task%i" % task_id):
with tf.variable_scope("regression_gate"):
self.weights['regression_gate_task%i' % task_id] = MLP(2 * self.params['hidden_size'], 1, [],
self.placeholders[
'out_layer_dropout_keep_prob'])
with tf.variable_scope("regression"):
self.weights['regression_transform_task%i' % task_id] = MLP(self.params['hidden_size'], 1, [],
self.placeholders[
'out_layer_dropout_keep_prob'])
# Computes the output of the GGNN model
computed_values = self.gated_regression(self.ops['final_node_representations'],
self.weights['regression_gate_task%i' % task_id],
self.weights['regression_transform_task%i' % task_id])
# Computes the difference
diff = self.placeholders['target_values'][internal_id, :] - computed_values
# Ignore none comparisons
task_target_mask = self.placeholders['target_mask'][internal_id, :]
task_target_num = tf.reduce_sum(task_target_mask) + SMALL_NUMBER
diff = diff * task_target_mask # Mask out unused values
self.ops['accuracy_task%i' % task_id] = tf.reduce_sum(tf.cast(tf.equal(tf.round(computed_values),
self.placeholders[
'target_values'][internal_id,
:]), tf.float32))
# Calculate loss (here, normalised mean squared error)
task_loss = tf.reduce_sum(tf.square(diff)) / task_target_num
# Normalise loss
task_loss = task_loss * (1.0 / (self.params['task_sample_ratios'].get(task_id) or 1.0))
self.ops['losses'].append(task_loss)
self.ops['loss'] = tf.reduce_sum(self.ops['losses'])
def make_train_step(self):
"""
Performs a training step.
:return: none.
"""
trainable_vars = self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if self.freeze_graph_model:
graph_vars = set(self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="graph_model"))
filtered_vars = []
for var in trainable_vars:
if var not in graph_vars:
filtered_vars.append(var)
else:
print("Freezing weights of variable %s." % var.name)
trainable_vars = filtered_vars
optimizer = tf.train.AdadeltaOptimizer(1.0)
grads_and_vars = optimizer.compute_gradients(self.ops['loss'], var_list=trainable_vars)
clipped_grads = []
for grad, var in grads_and_vars:
if grad is not None:
clipped_grads.append((tf.clip_by_norm(grad, self.params['clamp_gradient_norm']), var))
else:
clipped_grads.append((grad, var))
self.ops['train_step'] = optimizer.apply_gradients(clipped_grads)
self.sess.run(tf.local_variables_initializer())
def gated_regression(self, last_h, regression_gate, regression_transform):
raise Exception("Models have to implement gated_regression!")
def prepare_specific_graph_model(self) -> None:
raise Exception("Models have to implement prepare_specific_graph_model!")
def compute_final_node_representations(self) -> tf.Tensor:
raise Exception("Models have to implement compute_final_node_representations!")
def make_minibatch_iterator(self, data: Any, is_training: bool):
raise Exception("Models have to implement make_minibatch_iterator!")
def run_epoch(self, data, is_training: bool):
"""
Performs an epoch (i.e. learning iteration).
:param data: set of graphs [list].
:param is_training: boolean flag if data is for training or not [bool].
:return: loss [list], accuracies [list], error_ratios [list], instance_per_sec [list].
"""
loss = 0
accuracies = []
accuracy_ops = [self.ops['accuracy_task%i' % task_id] for task_id in self.params['task_ids']]
start_time = time.time()
processed_graphs = 0
batch_iterator = ThreadedIterator(self.make_minibatch_iterator(data, is_training), max_queue_size=5)
for step, batch_data in enumerate(batch_iterator):
num_graphs = batch_data[self.placeholders['num_graphs']]
processed_graphs += num_graphs
if is_training:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = self.params[
'out_layer_dropout_keep_prob']
fetch_list = [self.ops['loss'], accuracy_ops, self.ops['train_step']]
else:
batch_data[self.placeholders['out_layer_dropout_keep_prob']] = 1.0
fetch_list = [self.ops['loss'], accuracy_ops]
result = self.sess.run(fetch_list, feed_dict=batch_data)
(batch_loss, batch_accuracies) = (result[0], result[1])
loss += batch_loss * num_graphs
accuracies.append(np.array(batch_accuracies))
accuracies = np.sum(accuracies, axis=0) / processed_graphs
loss = loss / processed_graphs
error_ratios = 1 - accuracies
instance_per_sec = processed_graphs / (time.time() - start_time)
return loss, accuracies, error_ratios, instance_per_sec
def train(self):
"""
Train the GGNN model.
:return: none.
"""
with self.graph.as_default():
if self.restore is not None:
# Epoch resume training
_, valid_accs, _, _ = self.run_epoch(self.valid_data, False)
best_val_acc = np.sum(valid_accs)
best_val_acc_epoch = 0
print("\r\x1b[KResumed operation, initial cum. val. acc: %.5f" % best_val_acc)
else:
(best_val_acc, best_val_acc_epoch) = (0, 0)
for epoch in range(1, self.params['num_epochs'] + 1):
print("== Epoch %i" % epoch)
# Epoch train
train_loss, train_acc, train_errs, train_speed = self.run_epoch(self.train_data, True)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], train_acc)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], train_errs)])
print("\r\x1b[K Train: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (train_loss,
accs_str,
errs_str,
train_speed))
# Epoch validation
valid_loss, valid_accs, valid_errs, valid_speed = self.run_epoch(self.valid_data, False)
accs_str = " ".join(["%i:%.5f" % (id, acc) for (id, acc) in zip(self.params['task_ids'], valid_accs)])
errs_str = " ".join(["%i:%.5f" % (id, err) for (id, err) in zip(self.params['task_ids'], valid_errs)])
print("\r\x1b[K Valid: loss: %.5f | acc: %s | error_ratio: %s | instances/sec: %.2f" % (valid_loss,
accs_str,
errs_str,
valid_speed))
val_acc = np.sum(valid_accs) # type: float
if val_acc > best_val_acc:
# Save best model to self.best_model_file
self.save_model(self.best_model_file)
print("LOG: (Best epoch so far, cum. val. acc decreased to %.5f from %.5f. Saving to '%s')" %
(val_acc, best_val_acc, self.best_model_file))
best_val_acc = val_acc
best_val_acc_epoch = epoch
elif epoch - best_val_acc_epoch >= self.params['patience']:
print("LOG: Stopping training after %i epochs without improvement on validation accuracy." %
self.params['patience'])
break
def save_model(self, model_path: str) -> None:
"""
Saves the GGNN model.
:param model_path: path of GGNN model [str].
:return: none.
"""
weights_save = {}
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
assert variable.name not in weights_save
weights_save[variable.name] = self.sess.run(variable)
model_save = {"params": self.params, "weights": weights_save}
with open(model_path, 'wb') as out_file:
pickle.dump(model_save, out_file, pickle.HIGHEST_PROTOCOL)
def initialize_model(self) -> None:
"""
Initialises the GGNN model.
:return: none.
"""
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
self.sess.run(init_op)
def restore_model(self, path: str) -> None:
"""
Restores a GGNN model.
:param path: path of model [str]
:return: none.
"""
print("Restoring weights from file %s." % path)
with open(path, 'rb') as rest_file:
data_to_load = pickle.load(rest_file)
# Assert that we got the same model configuration
assert len(self.params) == len(data_to_load['params'])
for (par, par_value) in self.params.items():
# Different task_ids possible
if par not in ['task_ids', 'num_epochs']:
assert par_value == data_to_load['params'][par]
variables_to_initialize = []
with tf.name_scope("restore"):
restore_ops = []
used_vars = set()
for variable in self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
used_vars.add(variable.name)
if variable.name in data_to_load['weights']:
restore_ops.append(variable.assign(data_to_load['weights'][variable.name]))
else:
print('Freshly initializing %s since no saved value was found.' % variable.name)
variables_to_initialize.append(variable)
for var_name in data_to_load['weights']:
if var_name not in used_vars:
print('Saved weights for %s not used by model.' % var_name)
restore_ops.append(tf.variables_initializer(variables_to_initialize))
self.sess.run(restore_ops)
|
py
|
1a56e16171232f25a1cbd3f94b5df62754163da9
|
class StatefulObjectBase:
stateList = []
stateMachine = None
def ChangeState(self, state):
if self.stateMachine is None:
return
self.stateMachine.ChangeState(self.stateList[state])
def IsCurrentState(self, state):
if self.stateMachine is None:
return None
return self.stateMachine.currentState is self.stateList[state]
def Update(self):
if self.stateMachine is not None:
self.stateMachine.Update()
|
py
|
1a56e1a46fe3a4111fcf66467f35d7a68469a300
|
import os
import logging
import boto3
from botocore.exceptions import ClientError
from datetime import datetime as dt
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
TABLE_NAME = "aqa_scores"
VIDEO_NAME_KEY = 'videoName'
VIDEO_SCORE_KEY = 'videoScore'
def handler(event, context):
try:
logger.info(event)
raw_query_str = event["rawQueryString"]
if "=" in raw_query_str:
video_key = raw_query_str.split("=")[-1]
else:
logger.error("Something wrong with the HTTP request, no valid query string available.")
return None
# Get region name associated with this lambda function
region = os.environ["AWS_REGION"]
video_score = fetch_db_result(region, video_key)
return video_score
except Exception as e:
logger.exceptions(e)
raise e
def fetch_db_result(region_name, video_key):
"""Fetch item from DynamoDB of a certain video key
:param region_name: string
:param video_key: string
:return: video_score of DB item with the video_key. If not present, returns None.
"""
db_client = boto3.client("dynamodb", region_name=region_name)
logger.info(f"Getting score for [{video_key}] from table:{TABLE_NAME}...")
try:
response = db_client.get_item(
TableName = TABLE_NAME,
Key = {VIDEO_NAME_KEY: { "S": video_key }},
ProjectionExpression= VIDEO_SCORE_KEY,
)
logger.info(response)
if "Item" in response:
score = response["Item"]["videoScore"]["N"]
logger.info(f"Score for video [{video_key}]: {score}")
else:
score = None
logger.info(f"Score for video [{video_key}] is not available")
return score
except ClientError as e:
logger.exceptions(e)
return None
|
py
|
1a56e274f9e44c1bc1f8ba8ab46cb83d0f288ea5
|
import ast
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from enum import Enum
from pathlib import PurePath
from typing import Callable, Dict, List, Optional, Sequence, TypeVar, Union
from .logging import log
from .utils import fmt_path, str_path
class ArrowHead(Enum):
NORMAL = 0
SEQUENCE = 1
class Ignore:
pass
class Empty:
pass
RightSide = Union[str, Ignore, Empty]
@dataclass
class Transformed:
path: PurePath
class Ignored:
pass
TransformResult = Optional[Union[Transformed, Ignored]]
@dataclass
class Rule:
left: str
left_index: int
name: str
head: ArrowHead
right: RightSide
right_index: int
def right_result(self, path: PurePath) -> Union[str, Transformed, Ignored]:
if isinstance(self.right, str):
return self.right
elif isinstance(self.right, Ignore):
return Ignored()
elif isinstance(self.right, Empty):
return Transformed(path)
else:
raise RuntimeError(f"Right side has invalid type {type(self.right)}")
class Transformation(ABC):
def __init__(self, rule: Rule):
self.rule = rule
@abstractmethod
def transform(self, path: PurePath) -> TransformResult:
pass
class ExactTf(Transformation):
def transform(self, path: PurePath) -> TransformResult:
if path != PurePath(self.rule.left):
return None
right = self.rule.right_result(path)
if not isinstance(right, str):
return right
return Transformed(PurePath(right))
class ExactReTf(Transformation):
def transform(self, path: PurePath) -> TransformResult:
match = re.fullmatch(self.rule.left, str_path(path))
if not match:
return None
right = self.rule.right_result(path)
if not isinstance(right, str):
return right
# For some reason, mypy thinks that "groups" has type List[str]. But
# since elements of "match.groups()" can be None, mypy is wrong.
groups: Sequence[Optional[str]] = [match[0]] + list(match.groups())
locals_dir: Dict[str, Union[str, int, float]] = {}
for i, group in enumerate(groups):
if group is None:
continue
locals_dir[f"g{i}"] = group
try:
locals_dir[f"i{i}"] = int(group)
except ValueError:
pass
try:
locals_dir[f"f{i}"] = float(group)
except ValueError:
pass
result = eval(f"f{right!r}", {}, locals_dir)
return Transformed(PurePath(result))
class RenamingParentsTf(Transformation):
def __init__(self, sub_tf: Transformation):
super().__init__(sub_tf.rule)
self.sub_tf = sub_tf
def transform(self, path: PurePath) -> TransformResult:
for i in range(len(path.parts), -1, -1):
parent = PurePath(*path.parts[:i])
child = PurePath(*path.parts[i:])
transformed = self.sub_tf.transform(parent)
if not transformed:
continue
elif isinstance(transformed, Transformed):
return Transformed(transformed.path / child)
elif isinstance(transformed, Ignored):
return transformed
else:
raise RuntimeError(f"Invalid transform result of type {type(transformed)}: {transformed}")
return None
class RenamingPartsTf(Transformation):
def __init__(self, sub_tf: Transformation):
super().__init__(sub_tf.rule)
self.sub_tf = sub_tf
def transform(self, path: PurePath) -> TransformResult:
result = PurePath()
any_part_matched = False
for part in path.parts:
transformed = self.sub_tf.transform(PurePath(part))
if not transformed:
result /= part
elif isinstance(transformed, Transformed):
result /= transformed.path
any_part_matched = True
elif isinstance(transformed, Ignored):
return transformed
else:
raise RuntimeError(f"Invalid transform result of type {type(transformed)}: {transformed}")
if any_part_matched:
return Transformed(result)
else:
return None
class RuleParseError(Exception):
def __init__(self, line: "Line", reason: str):
super().__init__(f"Error in rule on line {line.line_nr}, column {line.index}: {reason}")
self.line = line
self.reason = reason
def pretty_print(self) -> None:
log.error(f"Error parsing rule on line {self.line.line_nr}:")
log.error_contd(self.line.line)
spaces = " " * self.line.index
log.error_contd(f"{spaces}^--- {self.reason}")
T = TypeVar("T")
class Line:
def __init__(self, line: str, line_nr: int):
self._line = line
self._line_nr = line_nr
self._index = 0
@property
def line(self) -> str:
return self._line
@property
def line_nr(self) -> int:
return self._line_nr
@property
def index(self) -> int:
return self._index
@index.setter
def index(self, index: int) -> None:
self._index = index
@property
def rest(self) -> str:
return self.line[self.index:]
def peek(self, amount: int = 1) -> str:
return self.rest[:amount]
def take(self, amount: int = 1) -> str:
string = self.peek(amount)
self.index += len(string)
return string
def expect(self, string: str) -> str:
if self.peek(len(string)) == string:
return self.take(len(string))
else:
raise RuleParseError(self, f"Expected {string!r}")
def expect_with(self, string: str, value: T) -> T:
self.expect(string)
return value
def one_of(self, parsers: List[Callable[[], T]], description: str) -> T:
for parser in parsers:
index = self.index
try:
return parser()
except RuleParseError:
self.index = index
raise RuleParseError(self, description)
# RULE = LEFT SPACE '-' NAME '-' HEAD (SPACE RIGHT)?
# SPACE = ' '+
# NAME = '' | 'exact' | 'name' | 're' | 'exact-re' | 'name-re'
# HEAD = '>' | '>>'
# LEFT = STR | QUOTED_STR
# RIGHT = STR | QUOTED_STR | '!'
def parse_zero_or_more_spaces(line: Line) -> None:
while line.peek() == " ":
line.take()
def parse_one_or_more_spaces(line: Line) -> None:
line.expect(" ")
parse_zero_or_more_spaces(line)
def parse_str(line: Line) -> str:
result = []
while c := line.peek():
if c == " ":
break
else:
line.take()
result.append(c)
if result:
return "".join(result)
else:
raise RuleParseError(line, "Expected non-space character")
QUOTATION_MARKS = {'"', "'"}
def parse_quoted_str(line: Line) -> str:
escaped = False
# Points to first character of string literal
start_index = line.index
quotation_mark = line.peek()
if quotation_mark not in QUOTATION_MARKS:
raise RuleParseError(line, "Expected quotation mark")
line.take()
while c := line.peek():
if escaped:
escaped = False
line.take()
elif c == quotation_mark:
line.take()
stop_index = line.index
literal = line.line[start_index:stop_index]
try:
return ast.literal_eval(literal)
except SyntaxError as e:
line.index = start_index
raise RuleParseError(line, str(e)) from e
elif c == "\\":
escaped = True
line.take()
else:
line.take()
raise RuleParseError(line, "Expected end of string literal")
def parse_left(line: Line) -> str:
if line.peek() in QUOTATION_MARKS:
return parse_quoted_str(line)
else:
return parse_str(line)
def parse_right(line: Line) -> Union[str, Ignore]:
c = line.peek()
if c in QUOTATION_MARKS:
return parse_quoted_str(line)
else:
string = parse_str(line)
if string == "!":
return Ignore()
return string
def parse_arrow_name(line: Line) -> str:
return line.one_of([
lambda: line.expect("exact-re"),
lambda: line.expect("exact"),
lambda: line.expect("name-re"),
lambda: line.expect("name"),
lambda: line.expect("re"),
lambda: line.expect(""),
], "Expected arrow name")
def parse_arrow_head(line: Line) -> ArrowHead:
return line.one_of([
lambda: line.expect_with(">>", ArrowHead.SEQUENCE),
lambda: line.expect_with(">", ArrowHead.NORMAL),
], "Expected arrow head")
def parse_eol(line: Line) -> None:
if line.peek():
raise RuleParseError(line, "Expected end of line")
def parse_rule(line: Line) -> Rule:
parse_zero_or_more_spaces(line)
left_index = line.index
left = parse_left(line)
parse_one_or_more_spaces(line)
line.expect("-")
name = parse_arrow_name(line)
line.expect("-")
head = parse_arrow_head(line)
right_index = line.index
right: RightSide
try:
parse_zero_or_more_spaces(line)
parse_eol(line)
right = Empty()
except RuleParseError:
line.index = right_index
parse_one_or_more_spaces(line)
right = parse_right(line)
parse_eol(line)
return Rule(left, left_index, name, head, right, right_index)
def parse_transformation(line: Line) -> Transformation:
rule = parse_rule(line)
if rule.name == "":
return RenamingParentsTf(ExactTf(rule))
elif rule.name == "exact":
return ExactTf(rule)
elif rule.name == "name":
if len(PurePath(rule.left).parts) > 1:
line.index = rule.left_index
raise RuleParseError(line, "Expected name, not multiple segments")
return RenamingPartsTf(ExactTf(rule))
elif rule.name == "re":
return RenamingParentsTf(ExactReTf(rule))
elif rule.name == "exact-re":
return ExactReTf(rule)
elif rule.name == "name-re":
return RenamingPartsTf(ExactReTf(rule))
else:
raise RuntimeError(f"Invalid arrow name {rule.name!r}")
class Transformer:
def __init__(self, rules: str):
"""
May throw a RuleParseException.
"""
self._tfs = []
for i, line in enumerate(rules.split("\n")):
line = line.strip()
if line:
tf = parse_transformation(Line(line, i))
self._tfs.append((line, tf))
def transform(self, path: PurePath) -> Optional[PurePath]:
for i, (line, tf) in enumerate(self._tfs):
log.explain(f"Testing rule {i+1}: {line}")
try:
result = tf.transform(path)
except Exception as e:
log.warn(f"Error while testing rule {i+1}: {line}")
log.warn_contd(str(e))
continue
if not result:
continue
if isinstance(result, Ignored):
log.explain("Match found, path ignored")
return None
if tf.rule.head == ArrowHead.NORMAL:
log.explain(f"Match found, transformed path to {fmt_path(result.path)}")
path = result.path
break
elif tf.rule.head == ArrowHead.SEQUENCE:
log.explain(f"Match found, updated path to {fmt_path(result.path)}")
path = result.path
else:
raise RuntimeError(f"Invalid transform result of type {type(result)}: {result}")
log.explain(f"Final result: {fmt_path(path)}")
return path
|
py
|
1a56e34611b114e7184d5659e1aa728eb7e88a52
|
"""Basic tests for quadrature GPy wrappers."""
import GPy
import numpy as np
import pytest
from pytest_lazyfixture import lazy_fixture
from emukit.model_wrappers.gpy_quadrature_wrappers import (
BaseGaussianProcessGPy,
BrownianGPy,
ProductBrownianGPy,
ProductMatern32GPy,
ProductMatern52GPy,
RBFGPy,
create_emukit_model_from_gpy_model,
)
from emukit.quadrature.kernels import (
QuadratureBrownianLebesgueMeasure,
QuadratureProductBrownianLebesgueMeasure,
QuadratureProductMatern32LebesgueMeasure,
QuadratureProductMatern52LebesgueMeasure,
QuadratureRBFGaussianMeasure,
QuadratureRBFLebesgueMeasure,
)
from emukit.quadrature.measures import GaussianMeasure, LebesgueMeasure
def get_prod_kernel(kernel_type, n_dim):
k = kernel_type(input_dim=1, active_dims=[0])
for i in range(1, n_dim):
k = k * kernel_type(input_dim=1, active_dims=[i])
return k
def data(n_dim: int):
return np.ones([3, n_dim]), np.ones([3, 1])
def integral_bounds(n_dim: int):
return n_dim * [(0, 1)]
def measure_lebesgue(n_dim: int):
return LebesgueMeasure.from_bounds(bounds=n_dim * [(0, 1)])
def measure_gaussian(n_dim: int):
return GaussianMeasure(mean=np.ones(n_dim), variance=1.0)
# === dimension fixtures start here
@pytest.fixture
def dim2():
return 2
@pytest.fixture
def dim1():
return 1
# === 1D GPy kernel fixtures start here
@pytest.fixture
def gpy_brownian(dim1):
kernel_type = GPy.kern.Brownian
return kernel_type(input_dim=dim1), kernel_type, False
@pytest.fixture
def gpy_matern32(dim1):
kernel_type = GPy.kern.Matern32
return kernel_type(input_dim=dim1), kernel_type, False
@pytest.fixture
def gpy_matern52(dim1):
kernel_type = GPy.kern.Matern52
return kernel_type(input_dim=dim1), kernel_type, False
# === 2D GPy kernel fixtures start here
@pytest.fixture
def gpy_rbf(dim2):
kernel_type = GPy.kern.RBF
return kernel_type(input_dim=dim2), kernel_type, False
@pytest.fixture
def gpy_prodbrownian(dim2):
kernel_type = GPy.kern.Brownian
return get_prod_kernel(kernel_type, dim2), kernel_type, True
@pytest.fixture
def gpy_prodmatern32(dim2):
kernel_type = GPy.kern.Matern32
return get_prod_kernel(kernel_type, dim2), kernel_type, True
@pytest.fixture
def gpy_prodmatern52(dim2):
kernel_type = GPy.kern.Matern52
return get_prod_kernel(kernel_type, dim2), kernel_type, True
def get_wrapper_dict(n_dim, measure, gpy_kern, gpy_kernel_wrapper_type, emukit_qkernel_type):
gpy_kernel, gpy_kernel_type, is_prod = gpy_kern
return {
"data": data(n_dim),
"measure": measure(n_dim),
"gpy_kernel": gpy_kernel,
"gpy_kernel_type": gpy_kernel_type,
"is_prod": is_prod,
"gpy_kernel_wrapper_type": gpy_kernel_wrapper_type,
"emukit_qkernel_type": emukit_qkernel_type,
}
# === RBF wrapper test cases
@pytest.fixture
def wrapper_rbf_1(dim2, gpy_rbf):
return get_wrapper_dict(dim2, measure_lebesgue, gpy_rbf, RBFGPy, QuadratureRBFLebesgueMeasure)
@pytest.fixture
def wrapper_rbf_2(dim2, gpy_rbf):
return get_wrapper_dict(dim2, measure_gaussian, gpy_rbf, RBFGPy, QuadratureRBFGaussianMeasure)
# === (product) Brownian wrapper test cases
@pytest.fixture
def wrapper_brownian_1(dim1, gpy_brownian):
return get_wrapper_dict(dim1, measure_lebesgue, gpy_brownian, BrownianGPy, QuadratureBrownianLebesgueMeasure)
@pytest.fixture
def wrapper_brownian_2(dim2, gpy_prodbrownian):
return get_wrapper_dict(
dim2, measure_lebesgue, gpy_prodbrownian, ProductBrownianGPy, QuadratureProductBrownianLebesgueMeasure
)
# === Product Matern32 wrapper test cases
@pytest.fixture
def wrapper_matern32_1(dim2, gpy_prodmatern32):
return get_wrapper_dict(
dim2, measure_lebesgue, gpy_prodmatern32, ProductMatern32GPy, QuadratureProductMatern32LebesgueMeasure
)
@pytest.fixture
def wrapper_matern32_2(dim1, gpy_matern32):
return get_wrapper_dict(
dim1, measure_lebesgue, gpy_matern32, ProductMatern32GPy, QuadratureProductMatern32LebesgueMeasure
)
# === Product Matern52 wrapper test cases
@pytest.fixture
def wrapper_matern52_1(dim2, gpy_prodmatern52):
return get_wrapper_dict(
dim2, measure_lebesgue, gpy_prodmatern52, ProductMatern52GPy, QuadratureProductMatern52LebesgueMeasure
)
@pytest.fixture
def wrapper_matern52_2(dim1, gpy_matern52):
return get_wrapper_dict(
dim1, measure_lebesgue, gpy_matern52, ProductMatern52GPy, QuadratureProductMatern52LebesgueMeasure
)
gpy_test_list = [
lazy_fixture("wrapper_rbf_1"),
lazy_fixture("wrapper_rbf_2"),
lazy_fixture("wrapper_brownian_1"),
lazy_fixture("wrapper_brownian_2"),
lazy_fixture("wrapper_matern32_1"),
lazy_fixture("wrapper_matern32_2"),
lazy_fixture("wrapper_matern52_1"),
lazy_fixture("wrapper_matern52_2"),
]
@pytest.mark.parametrize("wrapper", gpy_test_list)
def test_create_emukit_model_from_gpy_model_types(wrapper):
gpy_model = GPy.models.GPRegression(kernel=wrapper["gpy_kernel"], X=wrapper["data"][0], Y=wrapper["data"][1])
emukit_gp = create_emukit_model_from_gpy_model(gpy_model=gpy_model, measure=wrapper["measure"])
assert isinstance(emukit_gp.kern, wrapper["emukit_qkernel_type"])
assert isinstance(emukit_gp.kern.kern, wrapper["gpy_kernel_wrapper_type"])
# product kernel
if wrapper["is_prod"]:
assert isinstance(wrapper["gpy_kernel"], GPy.kern.Prod)
for k in wrapper["gpy_kernel"].parameters:
assert isinstance(k, wrapper["gpy_kernel_type"])
assert k.input_dim == 1
else:
assert isinstance(emukit_gp.gpy_model.kern, wrapper["gpy_kernel_type"])
def test_create_emukit_model_from_gpy_model_raises_warns():
input_dim = 2
gpy_kernel = GPy.kern.RBF(input_dim=input_dim)
gpy_model = GPy.models.GPRegression(kernel=gpy_kernel, X=np.ones([3, input_dim]), Y=np.ones([3, 1]))
bounds = input_dim * [(0, 1)]
measure = LebesgueMeasure.from_bounds(bounds=bounds)
# Neither measure nor bounds given
with pytest.raises(ValueError):
create_emukit_model_from_gpy_model(gpy_model=gpy_model)
# both measure and bounds are given. Bounds will be ignored.
with pytest.warns(UserWarning):
create_emukit_model_from_gpy_model(gpy_model=gpy_model, integral_bounds=bounds, measure=measure)
def test_base_gp_gpy_raises(gpy_prodbrownian):
incompatible_offset = -3
n_dim = 2
dat = data(n_dim=n_dim)
kern = ProductBrownianGPy(variance=1.0, input_dim=n_dim, offset=incompatible_offset)
measure = LebesgueMeasure.from_bounds(bounds=n_dim * [(0, 1)])
qkern = QuadratureProductBrownianLebesgueMeasure(brownian_kernel=kern, measure=measure)
# this GPy model and hence the emukit base_gp wrapper are not compatible with the kernel wrapper
# for offsets other than zero.
gpy_model = GPy.models.GPRegression(kernel=kern.gpy_brownian, X=dat[0], Y=dat[1])
with pytest.raises(ValueError):
BaseGaussianProcessGPy(kern=qkern, gpy_model=gpy_model)
|
py
|
1a56e3a215ef1039cd2f7700829a745d36bfdada
|
# TODO(naman) figure out how to either disable runtime hooks for other tests,
# or run pkgexploration tests in a separate process
import pytest
from . import runtime
# initialize runtime hooks at import time:
# as early as possible during pytest initialization
RUNTIME_REFMAP = runtime.patch()
@pytest.fixture(scope='session', autouse=True)
def runtime_refmap():
return RUNTIME_REFMAP
|
py
|
1a56e44127c1e507eb31f6cd1df573183adf14e0
|
#!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import argparse
import bisect
import getopt
import os
import re
import subprocess
import sys
symbolizers = {}
DEBUG = False
demangle = False
binutils_prefix = None
sysroot_path = None
binary_name_filter = None
fix_filename_patterns = None
logfile = sys.stdin
allow_system_symbolizer = True
force_system_symbolizer = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
if fix_filename_patterns:
for path_to_cut in fix_filename_patterns:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
def sysroot_path_filter(binary_name):
return sysroot_path + binary_name
def is_valid_arch(s):
return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s",
"armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"]
def guess_arch(addr):
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
return 'x86_64'
else:
return 'i386'
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.default_arch = default_arch
self.system = system
self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=%s' % demangle,
'--functions=linkage',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
if self.system == 'Darwin':
for hint in self.dsym_hints:
cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print(' '.join(cmd))
try:
result = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
bufsize=0,
universal_newlines=True)
except OSError:
result = None
return result
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print(symbolizer_input)
self.pipe.stdin.write("%s\n" % symbolizer_input)
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') or
not file_name.startswith('??')):
# Append only non-trivial frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
self.output_terminator = -1
def open_addr2line(self):
addr2line_tool = 'addr2line'
if binutils_prefix:
addr2line_tool = binutils_prefix + addr2line_tool
cmd = [addr2line_tool, '-fi']
if demangle:
cmd += ['--demangle']
cmd += ['-e', self.binary]
if DEBUG:
print(' '.join(cmd))
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
bufsize=0,
universal_newlines=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
lines = []
try:
self.pipe.stdin.write("%s\n" % offset)
self.pipe.stdin.write("%s\n" % self.output_terminator)
is_first_frame = True
while True:
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
if is_first_frame:
is_first_frame = False
elif function_name in ['', '??']:
assert file_name == function_name
break
lines.append((function_name, file_name));
except Exception:
lines.append(('??', '??:0'))
return ['%s in %s %s' % (addr, function, fix_filename(file)) for (function, file) in lines]
class UnbufferedLineConverter(object):
"""
Wrap a child process that responds to each line of input with one line of
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
if close_stderr:
dev_null = os.open('/dev/null', 0)
os.dup2(dev_null, 2)
os.execvp(args[0], args)
else:
# Disable echoing.
attr = termios.tcgetattr(fd)
attr[3] = attr[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, attr)
# Set up a file()-like interface to the child process
self.r = os.fdopen(fd, "r", 1)
self.w = os.fdopen(os.dup(fd), "w", 1)
def convert(self, line):
self.w.write(line + "\n")
return self.readline()
def readline(self):
return self.r.readline().rstrip()
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary, arch):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
self.arch = arch
self.open_atos()
def open_atos(self):
if DEBUG:
print('atos -o %s -arch %s' % (self.binary, self.arch))
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.atos = UnbufferedLineConverter(cmdline, close_stderr=True)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
atos_line = self.atos.convert('0x%x' % int(offset, 16))
while "got symbolicator for" in atos_line:
atos_line = self.atos.readline()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print('atos_line: ', atos_line)
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary, arch):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary, arch)
elif system == 'Linux' or system == 'FreeBSD':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print(result)
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset, arch):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
# a single symbolizer binary.
# On Darwin, if the dsym hint producer is present:
# 1. check whether we've seen this binary already; if so,
# use |llvm_symbolizers[binary]|, which has already loaded the debug
# info for this binary (might not be the case for
# |last_llvm_symbolizer|);
# 2. otherwise check if we've seen all the hints for this binary already;
# if so, reuse |last_llvm_symbolizer| which has the full set of hints;
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
result = None
if not force_system_symbolizer:
if not binary in self.llvm_symbolizers:
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
self.system, arch, self.dsym_hints)
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
else:
symbolizers[binary] = ChainSymbolizer([])
if result is None:
if not allow_system_symbolizer:
raise Exception('Failed to launch or use llvm-symbolizer.')
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary, arch))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def get_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
return [self.current_line]
else:
result = []
for symbolized_frame in symbolized_lines:
result.append(' #%s %s' % (str(self.frame_no), symbolized_frame.rstrip()))
self.frame_no += 1
return result
def process_logfile(self):
self.frame_no = 0
for line in logfile:
processed = self.process_line(line)
print('\n'.join(processed))
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
return [self.current_line]
if DEBUG:
print(line)
_, frameno_str, addr, binary, offset = match.groups()
arch = ""
# Arch can be embedded in the filename, e.g.: "libabc.dylib:x86_64h"
colon_pos = binary.rfind(":")
if colon_pos != -1:
maybe_arch = binary[colon_pos+1:]
if is_valid_arch(maybe_arch):
arch = maybe_arch
binary = binary[0:colon_pos]
if arch == "":
arch = guess_arch(addr)
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset, arch)
return self.get_symbolized_lines(symbolized_line)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
parser.add_argument('--force-system-symbolizer', action='store_true',
help='don\'t use llvm-symbolizer')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
if args.demangle:
demangle = True
if args.s:
binary_name_filter = sysroot_path_filter
sysroot_path = args.s
if args.c:
binutils_prefix = args.c
if args.logfile:
logfile = args.logfile
else:
logfile = sys.stdin
if args.force_system_symbolizer:
force_system_symbolizer = True
if force_system_symbolizer:
assert(allow_system_symbolizer)
loop = SymbolizationLoop(binary_name_filter)
loop.process_logfile()
|
py
|
1a56e523656167bc56f09a9849d622ed2cff3919
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# Adapted from https://mypyc.readthedocs.io/en/latest/getting_started.html#example-program
import time
def fib(n: int) -> int:
if n <= 1:
return n
else:
return fib(n - 2) + fib(n - 1)
t0 = time.time()
fib(32)
if "__file__" in locals():
print("interpreted")
else:
print("compiled")
print(time.time() - t0)
|
py
|
1a56e537f8ff448e3eeffe63909219393a8d189d
|
#!/usr/bin/python3
from pathlib import Path
import pytest
priv_key = "0x416b8a7d9290502f5661da81f0cf43893e3d19cb9aea3c426cfb36e8186e9c09"
addr = "0x14b0Ed2a7C4cC60DD8F676AE44D0831d3c9b2a9E"
@pytest.fixture(autouse=True)
def no_pass(monkeypatch):
monkeypatch.setattr("brownie.network.account.getpass", lambda x: "")
def test_save(accounts, tmpdir):
a = accounts.add(priv_key)
a.save(tmpdir + "/temp.json")
assert Path(tmpdir + "/temp.json").exists()
accounts._reset()
def test_save_nopath(accounts, tmpdir):
a = accounts.add(priv_key)
path = Path(a.save("temp", True))
assert path.exists()
path.unlink()
Path(a.save("temp"))
assert path.exists()
path.unlink()
accounts._reset()
def test_save_overwrite(accounts, tmpdir):
a = accounts.add(priv_key)
a.save(tmpdir + "/temp.json")
with pytest.raises(FileExistsError):
a.save(tmpdir + "/temp.json")
a.save(tmpdir + "/temp.json", True)
accounts._reset()
def test_load(accounts, tmpdir):
a = accounts.add(priv_key)
a.save(tmpdir + "/temp.json")
accounts._reset()
assert a not in accounts
a = accounts.load(tmpdir + "/temp.json")
assert a.address == addr
def test_load_nopath(accounts, tmpdir):
a = accounts.add(priv_key)
path = a.save("temp")
accounts._reset()
a = accounts.load("temp")
assert a.address == addr
Path(path).unlink()
def test_load_not_exists(accounts, tmpdir):
with pytest.raises(FileNotFoundError):
accounts.load(tmpdir + "/temp.json")
with pytest.raises(FileNotFoundError):
accounts.load("temp")
|
py
|
1a56e5824fbefc9b83d449aa10e13ffeaa4d445a
|
import csv
import os
from datetime import datetime
from time import time
modNomeReduzido = {
"Ampla concorrência": "AC",
"A3 - o candidato tenha cursado integralmente todas as séries do 2º ciclo do Ensino Fundamental, ou seja, do 6º ao 9º ano, e todas as séries, do Ensino Médio em escolas públicas de todo o território nacional.": "EP",
"COTAS - Escolas Públicas - Lei Estadual no 6.542, de 7 de dezembro de 2004": "EP",
"Candidato (s) Oriundos da rede pública de ensino.": "EP",
"Candidato (s) que tenham cursado integral, exclusiva e regularmente os anos finais do Ensino Fundamental (6º ao 9º ano) e todo o Ensino Médio em escolas da rede pública estadual ou municipal, excluindo-se os candidatos que tenham concluído curso de nível superior ainda que pendente a colação de grau.": "EP",
"Candidato (s) que tenham cursado integralmente o Ensino Médio em instituições públicas de ensino": "EP",
"Candidatos que tenham cursado integral e exclusivamente os ensinos fundamental e médio em estabelecimentos da rede pública de ensino.": "EP",
"Candidatos que tenham cursado todo o Ensino Médio e os últimos quatro anos do Ensino Fundamental em Escola Pública e que não se autodeclararam negros.": "EP",
"Candidatos que tenham cursado todo o Ensino Médio e os últimos quatro anos do Ensino Fundamental em Escola Pública e que se autodeclararam negros.": "EP",
"Candidatos que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "EP",
"Cota Social - Candidatos que frequentaram integralmente todas as séries do Ensino Médio ou equivalente em instituições públicas brasileiras de ensino.": "EP",
"com procedência de no mínimo sete anos de estudos regulares ou que tenham realizado curso supletivo ou outra modalidade de ensino equivalente, em estabelecimento da Rede Pública de Ensino do Brasil, compreendendo parte do ensino fundamental (6º ao 9º ano) e Ensino Médio completo (incluindo os cursos técnicos com duração de 4 anos) ou ter realizado curso supletivo ou outra modalidade de ensino equivalente.": "EP",
"que cursaram o Ensino Médio, integral e exclusivamente, em escola pública do Brasil, e que não tenham concluído curso de graduação": "EP",
"que frequentaram integralmente as 4 últimas séries do Ensino Fundamental e todas as séries do Ensino Médio em instituições públicas brasileiras de ensino.": "EP",
"que independentemente da renda, tenham cursado integralmente o ensino médio em escolas públicas. (L3)": "EP",
"que tenham cursado integralmente o Ensino Médio em escolas públicas.": "EP",
"que tenham cursado integralmente o Ensino Médio em instituições públicas de ensino ou tenham obtido certificado de conclusão com base no resultado do Exame Nacional do Ensino Médio, ENEM, ou do Exame Nacional de Certificação de Competências de Jovens e Adultos, ENCCEJA, ou de exames de certificação de competência ou de avaliação de jovens e adultos realizados pelos sistemas de ensino e não possuam curso superior concluído ou não estejam matriculados em curso superior.": "EP",
"que tenham cursado integralmente o Ensino Médio em instituições públicas e gratuitas de ensino": "EP",
"que tenham cursado o ensino fundamental e médio integralmente em escola pública": "EP",
"que tenham cursado todo o ensino médio e pelo menos dois anos do Ensino Fundamental II em escola pública": "EP",
"Candidato (s) que cursaram o ensino médio, integral e exclusivamente, em escola pública do Brasil e que não tenham concluído curso de graduação.": "EP",
"CANDIDATOS QUE TENHAM CURSADO, NA REDE PÚBLICA, OS ÚLTIMOS QUATRO ANOS DO ENSINO FUNDAMENTAL E TODO O ENSINO MÉDIO": "EP",
"demais de escola pública": "EP",
"que tenham cursado parcialmente o ensino médio em escola pública (pelo menos um ano com aprovação) ou em escolas de direito privado sem fins lucrativos, cujo orçamento da instituição seja proveniente do poder público, em pelo menos 50%.": "EP",
"membros de grupos indígenas": "INDIGENA",
"Candidato (s) Indígenas": "INDIGENA",
"indígenas aldeados": "INDIGENA",
"indígenas, condição que deve ser comprovada mediante apresentação do RANI (Registro Administrativo de Nascimento de Indígena) ou declaração atestada pela FUNAI.": "INDIGENA",
"Candidato (s) indígena": "INDIGENA",
"Candidatos indígenas": "INDIGENA",
"Indígenas": "INDIGENA",
"Candidatos autodeclarados indígenas que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "INDIGENA + EP",
"INDÍGENAS que cursaram integralmente o Ensino Médio em escolas públicas": "INDIGENA + EP",
"Candidatos Indígenas que tenham cursado todo o 2º Ciclo do Ensino Fundamental e o Ensino Médio exclusivamente em escola pública, que tenham renda bruta familiar mensal inferior ou igual a 04 (quatro) vezes o valor do salário mínimo nacional vigente no ato da matrícula e que não possuam título de graduação.": "INDIGENA + RENDA + EP",
"Candidatos Indígenas, de baixa renda que sejam egressos de escola pública:": "INDIGENA + RENDA + EP",
"Candidatos autodeclarados indígenas, com renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo e que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "INDIGENA + RENDA + EP",
"Candidato (s) economicamente hipossuficientes indígenas": "INDIGENA + RENDA",
"Candidatos que tenham cursado todo o Ensino Médio e os últimos quatro anos do Ensino Fundamental em Escola Pública e que sejam índios reconhecidos pela FUNAI ou moradores de comunidades remanescentes de quilombos registrados na Fundação Cultural Palmares.": "INDIGENA/QUILOMBOLA + EP",
"Categoria VI - Pessoas com deficiência": "PCD",
"Candidato (s) Pessoas com Deficiências - PCD": "PCD",
"Candidato (s) com deficiência": "PCD",
"Candidato (s) com deficiência.": "PCD",
"Candidatos Candidatos Pessoa com Deficiência independentemente da sua condição acadêmica prévia declarada (pública ou privada)": "PCD",
"Candidatos com deficiência:": "PCD",
"Cota para candidatos com deficiência": "PCD",
"com Deficiência, Transtorno do Espectro Autista, Altas Habilidades/Superdotação e/ou estudantes que sejam público alvo da educação especial": "PCD",
"com deficiência (Denominada A1)": "PCD",
"com deficiência que concluíram o Ensino Médio, independente do percurso de formação.": "PCD",
"com deficiência": "PCD",
"com deficiência: será reservada uma vaga, por curso e turno, qualquer que seja a sua procedência escolar.": "PCD",
"Candidatos com deficiência": "PCD",
"com deficiência PROAAf": "PCD",
"Pessoa com Deficiência": "PCD",
"Reserva de vagas para candidatos com deficiência (PCD)": "PCD",
"Candidatos com deficiência que cursaram todo o ensino médio em escolas públicas.": "PCD + EP",
"Candidatos com deficiência que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PCD + EP",
"com deficiência que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PCD + EP",
"Candidatos com deficiência que tenham renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo e que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PCD + RENDA + EP",
"com deficiência que tenham renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo e que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PCD + RENDA + EP",
"Candidatos com deficiência, transtorno do espectro autista e altas habilidades que tenham cursado todo o 2º Ciclo do Ensino Fundamental e o Ensino Médio exclusivamente em escola pública, que tenham renda bruta familiar mensal inferior ou igual a 04 (quatro) vezes o valor do salário mínimo nacional vigente no ato da matrícula e que não possuam título de graduação.": "PCD + RENDA + EP",
"Candidatos com deficiência autodeclarados pretos ou pardos que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PCD + PRETO/PARDO + EP",
"Candidatos com deficiência autodeclarados pretos ou pardos, que tenham renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo e que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PCD + PRETO/PARDO + RENDA + EP",
"autodeclarados Pretos, Pardos e Indígenas": "PPI",
"A2 - Candidatos negros, indígenas e quilombolas com comprovação de carência socioeconômica": "PPI/QUILOMBOLA + RENDA",
"Candidato (s) NEGROS, INDÍGENAS OU QUILOMBOLAS COM COMPROVAÇÃO DE CARÊNCIA SOCIOECONÔMICA": "PPI/QUILOMBOLA + RENDA",
"Candidatos Negros ou Indígenas ou Quilombolas com comprovação de carência socioeconômica": "PPI/QUILOMBOLA + RENDA",
"Candidatos negros, indígenas ou oriundos de comunidades quilombolas com comprovação de carência socioeconômica.": "PPI/QUILOMBOLA + RENDA",
"Candidatos autodeclarados pretos, pardos ou indígenas que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PPI + EP",
"Candidatos que tenha cursado integral e exclusivamente os ensinos fundamental e médio em estabelecimentos da rede pública de ensino e que se autodeclarem negros.": "PPI + EP",
"NEGROS (pretos e pardos) que cursaram integralmente o Ensino Médio em escolas públicas (Banca avaliadora obrigatória)": "PPI + EP",
"autodeclarados pretos, pardos ou indígenas que, independentemente da renda, tenham cursado integralmente o ensino médio em escolas públicas. (L4)": "PPI + EP",
"A2 - Candidatos negros ou indígenas com comprovação de carência socioeconômica": "PPI + RENDA",
"Candidato (s) negros ou indígenas com comprovação de carência socioeconômica": "PPI + RENDA",
"Candidatos autodeclarados pretos, pardos ou indígenas, com renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo e que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PPI + RENDA + EP",
"autodeclarados pretos, pardos ou indígenas com renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo, que tenham cursado integralmente o ensino médio em escolas públicas. (L2)": "PPI + RENDA + EP",
"Candidatos com deficiência autodeclarados pretos, pardos ou indígenas que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PPI + PCD + EP",
"Candidatos com deficiência autodeclarados pretos, pardos ou indígenas, que tenham renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo e que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012)": "PPI + PCD + RENDA + EP",
"Negros (pretos ou pardos) (Denominada A2)": "PRETO/PARDO",
"Negros": "PRETO/PARDO",
"autodeclarados negros de forma irrestrita, independente do percurso de formação.": "PRETO/PARDO",
"negros, entendidos como candidatos que possuem fenótipo que os caracterizem, na sociedade, como pertencentes ao grupo racial negro": "PRETO/PARDO",
"Candidatos autodeclarados pretos ou pardos que, independentemente da renda (art. 14, II, Portaria Normativa nº 18/2012), tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PRETO/PARDO + EP",
"Cota Sociorracial: candidatos(as) autodeclarados(as) negros(as) e que tenham frequentado integralmente todas as séries do Ensino Médio ou equivalente em instituições públicas brasileiras de ensino.": "PRETO/PARDO + EP",
"autodeclarados negros (somatório das categorias pretos e pardos, segundo classificação étnico-racial adotada pelo IBGE) que tenham cursado o ensino fundamental 2 (do 6º ao 9º ano) e ensino medio completo ( incluindo os cursos técnicos com duração de 4 anos) ou ter realizado curso supletivo ou outra modalidade de ensino equivalente, em estabelecimento da Rede Pública de Ensino do Brasil. Vedado aos portadores de diploma de nível superior": "PRETO/PARDO + EP",
"autodeclarados negros que tenham cursado todo o 2º ciclo do Ensino Fundamental (5ª a 8ª séries) e todo o Ensino Médio, única e exclusivamente, na rede pública de ensino no Brasil.": "PRETO/PARDO + EP",
"que se declararem negros que tenham cursado todo o ensino médio e pelo menos dois anos do Ensino Fundamental II em escola pública seguindo a ordem de classificação": "PRETO/PARDO + EP",
"pretos e pardos, que tenham cursado integralmente o Ensino Médio em escolas públicas.": "PRETO/PARDO + EP",
"Candidato (s) economicamente hipossuficientes negros e pardos": "PRETO/PARDO + RENDA",
"Candidatos Negros, de baixa renda que sejam egresso de escola pública:": "PRETO/PARDO + RENDA + EP",
"Candidatos autodeclarados pretos ou pardos, com renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "PRETO/PARDO + RENDA + EP",
"Categoria I - Candidatos declarados negros, de baixa renda e egressos de escola pública": "PRETO/PARDO + RENDA + EP",
"Candidatos Negros que tenham cursado todo o 2º Ciclo do Ensino Fundamental e o Ensino Médio exclusivamente em escola pública, que tenham renda bruta familiar mensal inferior ou igual a 04 (quatro) vezes o valor do salário mínimo nacional vigente no ato da matrícula e que não possuam título de graduação.": "PRETO/PARDO + RENDA + EP",
"Categoria II - Candidatos declarados quilombolas, de baixa renda e egressos de escola pública": "QUILOMBOLA + RENDA + EP",
"Categoria III - Candidatos declarados indígenas, de baixa renda e egressos de escola pública": "INDIGENA + RENDA + EP",
"Candidato (s) Quilombolas": "QUILOMBOLA",
"membros de comunidade quilombola": "QUILOMBOLA",
"de comunidades remanescentes de quilombos ou comunidades identitárias tradicionais": "QUILOMBOLA",
"Candidato (s) economicamente hipossuficientes": "RENDA",
"Candidato (s) que tenham cursado, na rede pública, os últimos quatro anos do ensino fundamental e todo o ensino médio e com comprovação de carência socioeconômica": "RENDA + EP",
"Candidatos Egressos da Escola Pública, de baixa renda:": "RENDA + EP",
"Candidatos com renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo que tenham cursado integralmente o ensino médio em escolas públicas (Lei nº 12.711/2012).": "RENDA + EP",
"Candidatos que tenham cursado na rede pública os últimos quatro anos do ensino fundamental e todo o ensino médio e com comprovação de carência socioeconômica.": "RENDA + EP",
"Categoria V - Outros candidatos de baixa renda e egressos de escola pública": "RENDA + EP",
"com renda familiar bruta per capita igual ou inferior a 1,5 salário mínimo, que tenham cursado integralmente o ensino médio em escolas públicas. (L1)": "RENDA + EP",
"A3 - Candidatos que tenham cursado na rede pública os últimos quatro anos do ensino fundamental e todo o ensino médio e com comprovação de carência socioeconômica": "RENDA + EP",
"Candidatos que tenham cursado na rede pública os últimos quatro anos do ensino fundamental e todo o ensino médio e com comprovação de carência socioeconômica": "RENDA + EP",
"Pessoas Transgêneras em situação de Vulnerabilidade Econômica.": "TRANS",
"transexuais, travestis e transgêneros": "TRANS",
"Pessoas Transgêneras, independentemente de renda familiar": "TRANS",
"Candidatos Transexuais, travestis e transgêneros que tenham cursado todo o 2º Ciclo do Ensino Fundamental e o Ensino Médio exclusivamente em escola pública, que tenham renda bruta familiar mensal inferior ou igual a 04 (quatro) vezes o valor do salário mínimo nacional vigente no ato da matrícula e que não possuam título de graduação.": "TRANS + RENDA + EP"
}
modNomeReduzido = {k.lower(): v for k, v in modNomeReduzido.items()}
class Aluno:
def __init__(self, m):
self.codigo, self.nome, self.posicao, self.nota, self.modNome, self.bonus = m
# Reduces modality names based on the modNomeReduzido dict
modNomeKey = self.modNome.lower()
if modNomeKey in modNomeReduzido:
self.modNome = modNomeReduzido[modNomeKey]
def __str__(self):
s = [
"\t{}:".format(self.modNome),
"\t\t{:>3}: {:>6} - {}".format(self.posicao, self.nota, self.nome)
]
return "\n".join(s)
class Curso:
def __init__(self, info, alunos):
self.campusUF, self.iesNome, self.iesSG, self.campusCidade = info[:4]
self.campusNome, self.cursoNome, self.cursoGrau, self.cursoTurno, self.vagasTotais = info[4:]
self.alunos = [Aluno(alunos[i:i+6]) for i in range(0, len(alunos), 6)]
def __str__(self):
s = [
"{} ({}) - {}, {}, {}".format(self.iesNome, self.iesSG, self.campusNome, self.campusCidade, self.campusUF),
"{}, {}, {}".format(self.cursoNome, self.cursoGrau, self.cursoTurno),
]
alunos = [str(m) for m in self.alunos]
# Only print modality name if it's the first occurence
last = alunos[0].split("\n")[0]
for i in range(1, len(alunos)):
if alunos[i].split("\n")[0] == last:
alunos[i] = alunos[i].split("\n")[1]
else:
last = alunos[i].split("\n")[0]
return "\n".join(s+alunos)
directory = "data"
filename = input("Filename (without extension): /{}/".format(directory)).strip()
t0 = time()
##################################################
# Get all course info from .csv file
try:
with open("all_courses.csv", "r", encoding="UTF-8") as csvFile:
csvFileReader = csv.reader(csvFile, delimiter=";")
cursosInfo = {oferta[-1]: oferta[:-1] for oferta in [tuple(l) for l in csvFileReader]}
except FileNotFoundError:
print("File /all_courses.csv not found.")
exit()
# Read csv and process strings (via class constructors)
try:
with open(os.path.join(directory, filename + ".csv"), "r", encoding="UTF-8") as csvFile:
csvFileReader = csv.reader(csvFile, delimiter=";")
cursos = [Curso(cursosInfo[c[0]], c[1:]) for c in csvFileReader]
except FileNotFoundError:
print("File /{}/{}.csv not found.".format(directory, filename))
exit()
# Sort lexicographically
cursos = sorted(cursos, key=lambda x: (x.campusUF, x.iesNome, x.iesSG, x.campusCidade, x.campusNome, x.cursoNome))
# Write to .txt
with open(os.path.join(directory, filename + ".txt"), "w+", encoding="UTF-8") as humanFile:
for i, curso in enumerate(cursos):
nl = str(curso).index("\n")
# Only write iesNome if it's the first occurence
if i == 0 or (str(curso)[:nl] != str(cursos[i-1]).split("\n")[0]):
humanFile.write("="*50 + "\n")
humanFile.write(str(curso)[:nl] + "\n")
humanFile.write("="*50 + "\n")
humanFile.write(str(curso)[nl+1:] + "\n")
humanFile.write("\n")
print("Written {} courses to '{}.txt' in {:.1f}s.".format(len(cursos), directory+"/"+filename, time()-t0))
|
py
|
1a56e6271fc1ba4ad8c23f2015d984f3457e91ab
|
"""
data collector
"""
from random import randint
from time import sleep
import csv
import re
import concurrent.futures
import datetime
from bs4 import BeautifulSoup
import urllib3
import requests
from utils.utilities import ProjectCommon
OUTPUT_FILE_PATH = 'reviews_with_ranks.csv'
SCRAPER_FINAL_OUTPUT = []
MOVIE_REVIEW_URLS = []
class Anonymize:
"""
anonymize class
"""
def __init__(self):
self.headers = [{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)'
' AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/50.0.2661.102 Safari/537.36'},
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; '
'rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5 '
'(.NET CLR 3.5.30729)'},
{'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'MyAppName/1.0.0 ([email protected])'}]
@staticmethod
def sleeper():
"""
sleeper method used to sleep between requests
:return:
"""
sleep(randint(2, 5))
def randomize_request_headers(self):
"""
method to randomize request headers for each request
:return:
"""
return self.headers[randint(0, len(self.headers) - 1)]
def movie_review_url_collector():
"""
function collecting urls with the movie reviews
:return:0
"""
start_page_urls = ['https://www.csfd.cz/zebricky/nejhorsi-filmy/?show=complete',
'https://www.csfd.cz/zebricky/nejlepsi-filmy/?show=complete']
anonymize = Anonymize()
for start_page in start_page_urls:
page = requests.get(start_page, headers=anonymize.randomize_request_headers())
soup = BeautifulSoup(page.content, 'html.parser')
movie_review_url = soup.find_all('td', attrs={'class': 'film'})
for url_item in movie_review_url[:300]:
children = url_item.findChildren("a", recursive=False)
movie_name = str(children).split("/")[2]
for random_index in ([2, 3, 4, 5, 6, 7]):
review_page = str(random_index)
MOVIE_REVIEW_URLS.append('https://www.csfd.cz/film/{}/komentare/strana-{}'.
format(movie_name, review_page))
return 0
def movie_review_scraper(url_to_scrape):
"""
function getting the url from the argument, requesting the raw html
and scraping the movie review html code
:param url_to_scrape: url
:return:None
"""
anonymize = Anonymize()
print(f'{datetime.datetime.now()} started scraping {url_to_scrape}')
try:
anonymize.sleeper()
page = requests.get(url, headers=anonymize.randomize_request_headers())
if page.status_code == 200:
# the <li> html tag structure we're scraping in loops:
#
# variation #1 with star count as rank in the img alt text tag:
# <li id = "comment-796722" >
# <div class ="info" >
# <a href = "" > all reviewer's reviews </a>/
# <a href = "" > <img src = "" class ="" ></a>
# </div>
# <h5 class = "author" > <a href="" > reviewers nickname </a></h5>
# <img src = "" class ="rating" width="32" alt="****" / >
# <p class ="post" > movie review
# <span class ="date desc" > date of review </span></p>
# </li>
#
# variation #2 with 1 word ranking ("odpad!" translates to "junk") in the strong tag:
# <li id = "comment-9092651" >
# <div class ="info" >
# <a href = "" > all reviewer's reviews </a>/
# <a href = "" > <img src = "" class ="" ></a>
# </div>
# <h5 class ="author" > <a href="" > reviewers nickname </a></h5>
# <strong class ="rating" > odpad! </strong>
# <p class ="post" > movie review
# <span class ="date desc" > date of review </span></p>
# </li>
soup = BeautifulSoup(page.content, 'html.parser')
_l_substring_to_trim_from = '<p class="post">'
_r_substring_to_trim_to = '<span class="date desc">'
for soup_item in soup.find_all("li", {"id": re.compile(r"comment-*")}):
scraper_temp_output = []
img = soup_item.findChildren("img",
attrs={'class': 'rating'})
strong = soup_item.findChildren(["strong", "p"],
attrs={'class': ['rating', 'post']})
if strong and str(strong).startswith('[<strong class="rating">odpad!</strong>'):
_r_trim = len(str(strong)) - str(strong).rfind(_r_substring_to_trim_to)
_l_trim = str(strong).rfind(_l_substring_to_trim_from) + len(_l_substring_to_trim_from)
scraper_temp_output.append({'rank': -2,
'review': str(strong)[_l_trim:-_r_trim]})
else:
_r_trim = len(str(img)) - str(img).rfind(_r_substring_to_trim_to)
_l_trim = str(img).rfind(_l_substring_to_trim_from) + len(_l_substring_to_trim_from)
if img and str(img).startswith('[<img alt="*"'):
scraper_temp_output.append({'rank': -2,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="**"'):
scraper_temp_output.append({'rank': -1,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="***"'):
scraper_temp_output.append({'rank': 1,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="****"'):
scraper_temp_output.append({'rank': 2,
'review': str(img)[_l_trim:-_r_trim]})
elif img and str(img).startswith('[<img alt="*****"'):
scraper_temp_output.append({'rank': 2,
'review': str(img)[_l_trim:-_r_trim]})
for item in scraper_temp_output:
raw_review = item.get('review')
review = ProjectCommon.remove_html(str(raw_review).lower())
rank = item.get('rank')
SCRAPER_FINAL_OUTPUT.append((review, rank))
print(f'{datetime.datetime.now()} finished scraping {url}')
else:
print(f'{datetime.datetime.now()} Invalid request status code '
f'{str(page.status_code)} for {url}')
except urllib3.exceptions.ConnectionError as connerr:
print(str(connerr))
except Exception as exc:
print(str(exc))
if __name__ == "__main__":
# fill the list with urls used for movie data scraping
movie_review_url_collector()
# process the list items in a multi-threaded pool based
# scraper function movie_review_scraper
with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
FUTURE_TO_URL = {executor.submit(movie_review_scraper, url):
url for url in MOVIE_REVIEW_URLS}
for future in concurrent.futures.as_completed(FUTURE_TO_URL):
url = FUTURE_TO_URL[future]
try:
data = future.result()
except Exception as exc:
print('%r generated an exception: %s' % (url, exc))
# write to OUTPUT_FILE_PATH csv file the scraped movie review data
with open(OUTPUT_FILE_PATH, 'w', encoding='utf8', newline='\n') as fw:
writer = csv.writer(fw, escapechar='/', quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(SCRAPER_FINAL_OUTPUT)
print("Movie review data collection phase complete.")
|
py
|
1a56e6450f86d82ba120d0d9c2d9967988d62442
|
import os
import numpy as np
from scipy.io import netcdf_file
from pychemia.utils.periodic import atomic_symbol
from .htmlparser import MyHTMLParser
"""
This module provides general routines used by abipython
but not requiring the abipython classes
"""
__author__ = "Guillermo Avendano-Franco"
__copyright__ = "Copyright 2016"
__version__ = "1.1"
__maintainer__ = "Guillermo Avendano-Franco"
__email__ = "[email protected]"
__status__ = "Development"
__date__ = "May 13, 2016"
def netcdf2dict(filename):
"""
Read a NetCDF file and create a python dictionary with
numpy arrays for variables
Args:
filename:
NetCDF filename
"""
if not os.path.isfile(filename):
print('ERROR: No such file: ', filename)
return None
output = {}
netcdffile = netcdf_file(filename, 'r', mmap=False)
for ii in netcdffile.variables.keys():
output[ii] = netcdffile.variables[ii][:]
netcdffile.close()
return output
def psp_name(atomicnumber, exchange, kind):
"""
Return the filename of a certain PSP given the
atomic number, exchange ('LDA' or 'GGA')
and kind ('FHI','TM')
:param atomicnumber: (int) Atomic number
:param exchange: (str) 'LDA' or 'GGA'
:param kind: (str) Source of Pseudopotentials
:return:
"""
atom_symbol = str(atomic_symbol(atomicnumber))
if kind == 'FHI' and exchange == 'LDA':
filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.LDA.fhi'
elif kind == 'FHI' and exchange == 'GGA':
filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.GGA.fhi'
elif kind == 'CORE' and exchange == 'LDA':
filename = str(atomicnumber) + atom_symbol.lower() + '.1s_psp.mod'
elif kind == 'GTH' and exchange == 'LDA':
filename = str(atomicnumber).zfill(2) + atom_symbol.lower() + '.pspgth'
elif kind == 'TM' and exchange == 'LDA':
filename = str(atomicnumber) + atom_symbol.lower() + '.pspnc'
elif kind == 'AE' and exchange == 'DEN':
filename = '0.' + str(atomicnumber).zfill(2) + '-' + atom_symbol + '.8.density.AE'
elif kind == 'FC' and exchange == 'DEN':
filename = str(atomicnumber).zfill(2) + '-' + atom_symbol + '.8.fc'
elif kind == 'PAW' and exchange == 'GGA':
filename = 'JTH-PBE-atomicdata-0.2/' + atom_symbol + '.GGA_PBE-JTH.xml'
elif kind == 'PAW' and exchange == 'LDA':
filename = 'JTH-LDA-atomicdata-0.2/' + atom_symbol + '.LDA_PW-JTH.xml'
elif kind == 'HGH' and exchange == 'GGA':
filename = str(atomicnumber).zfill(2) + atom_symbol.lower() + '.pbe_hgh'
elif kind == 'ONC' and exchange == 'PBE':
filename = 'pbe_s_sr' + os.sep + atom_symbol + '.psp8'
else:
print('The combination of exchange=%s and kind=%s is not known' % (exchange, kind))
filename = ''
return filename
def split_varname(varname):
if varname[-2:].isdigit():
prefix = varname[:-2]
suffix = varname[-2:]
elif varname[-2].isdigit() and varname[-1] == '?':
prefix = varname[:-2]
suffix = varname[-2:]
elif varname[-1].isdigit() and varname[-2] == '?':
prefix = varname[:-2]
suffix = varname[-2:]
elif varname[-1].isdigit():
prefix = varname[:-1]
suffix = varname[-1:]
elif varname[-1] == '?':
prefix = varname[:-1]
suffix = varname[-1:]
else:
prefix = varname
suffix = ''
return prefix, suffix
def plot_simple(variables, varname):
from matplotlib.pylab import subplots
from numpy import arange, mean, apply_along_axis, linalg
from math import sqrt
fig, ax = subplots(nrows=1, ncols=1)
fig.set_size_inches(15, 4)
ndtset = variables['ndtset'][0]
lens = np.array([len(variables[varname + str(x + 1)]) for x in range(ndtset)])
x = arange(ndtset) + 1
if max(lens) == min(lens):
if lens[0] == 1:
y = np.array([variables['etotal' + str(x + 1)][0] for x in range(ndtset)])
elif lens[0] % 3 == 0:
y = np.array([mean(apply_along_axis(linalg.norm, 1, variables['fcart' + str(x + 1)].reshape((-1, 3))))
for x in range(ndtset)])
else:
y = np.array([sqrt(sum(variables['fcart' + str(x + 1)] ** 2)) for x in range(ndtset)])
ax.plot(x, y, 'ro')
ax.plot(x, y, 'b-')
ax.set_xlabel('Dataset')
ax.set_ylabel(varname)
ax.set_xlim(1, ndtset + 1)
if ndtset < 30:
ax.set_xticks(arange(ndtset + 1))
ax.grid(which='major', axis='both')
def abihelp(varname):
import json
hp = MyHTMLParser()
import pychemia.code.abinit as _pdca
rf = open(_pdca.__path__[0] + '/ABINIT_variables.json', 'r')
variables = json.load(rf)
rf.close()
if varname not in variables.keys():
print('ERROR: ' + varname + ' is not in the list of variables of ABINIT')
return
else:
abivar = variables[varname]
print(varname)
print('')
print('DEFINITION:', abivar['definition'])
print('SECTION: ', abivar['section'])
print('DEFAULT: ', hp.feed(abivar['default']))
print('')
print(hp.feed(abivar['text']))
print('')
|
py
|
1a56e6901f07236895d8ee3495e3a829b2383fc6
|
'''
Pattern
Reversed Mirrored Right triangle star pattern
Enter number of rows: 5
*****
****
***
**
*
'''
print('Reversed Mirrored Right Triangle star pattern: ')
rows=int(input('Enter number of rows: '))
for i in range(1,rows+1):
for j in range(1,i):
print(' ',end="")
for j in range(i,rows+1):
print('*',end='')
print('\n',end=' ')
|
py
|
1a56e6add34b3fdf193a4452094a8d6eb4e8a754
|
#
# This is the Robotics Language compiler
#
# Parameters.py: Definition of the parameters for this package
#
# Created on: June 22, 2017
# Author: Gabriel A. D. Lopes
# Licence: Apache 2.0
# Copyright: 2014-2017 Robot Care Systems BV, The Hague, The Netherlands. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
py
|
1a56e6c6dcbd1a257c63e768c3fb9730a47db0ea
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: [email protected]
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from PIL import Image
def get_mask_pallete(npimg, dataset='ade20k'):
"""Get image color pallete for visualizing masks"""
# recovery boundary
if dataset == 'pascal_voc':
npimg[npimg==21] = 255
# put colormap
out_img = Image.fromarray(npimg.squeeze().astype('uint8'))
if dataset == 'ade20k':
out_img.putpalette(adepallete)
elif dataset == 'citys':
out_img.putpalette(citypallete)
elif dataset in ('detail', 'pascal_voc', 'pascal_aug'):
out_img.putpalette(vocpallete)
return out_img
def _get_voc_pallete(num_cls):
n = num_cls
pallete = [0]*(n*3)
for j in range(0,n):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
vocpallete = _get_voc_pallete(256)
adepallete = [0, 0, 0, 120, 120, 120, 180, 120, 120, 6, 230, 230, 80, 50, 50, 4, 200, 3, 120, 120, 80, 140, 140, 140, 204, 5, 255, 230, 230, 230, 4, 250, 7, 224, 5, 255, 235, 255, 7, 150, 5, 61, 120, 120, 70, 8, 255, 51, 255, 6, 82, 143, 255, 140, 204, 255, 4, 255, 51, 7, 204, 70, 3, 0, 102, 200, 61, 230, 250, 255, 6, 51, 11, 102, 255, 255, 7, 71, 255, 9, 224, 9, 7, 230, 220, 220, 220, 255, 9, 92, 112, 9, 255, 204, 70, 3, 7, 255, 224, 255, 184, 6, 10, 255, 71, 255, 41, 10, 7, 255, 255, 224, 255, 8, 102, 8, 255, 255, 61, 6, 255, 194, 7, 255, 122, 8, 0, 255, 20, 255, 8, 41, 255, 5, 153, 6, 51, 255, 235, 12, 255, 160, 150, 20, 0, 163, 255, 140, 140, 140, 250, 10, 15, 20, 255, 0, 31, 255, 0, 255, 31, 0, 255, 224, 0, 153, 255, 0, 0, 0, 255, 255, 71, 0, 0, 235, 255, 0, 173, 255, 31, 0, 255, 11, 200, 200, 255, 82, 0, 0, 255, 245, 0, 61, 255, 0, 255, 112, 0, 255, 133, 255, 0, 0, 255, 163, 0, 255, 102, 0, 194, 255, 0, 0, 143, 255, 51, 255, 0, 0, 82, 255, 0, 255, 41, 0, 255, 173, 204, 70, 3, 173, 255, 0, 0, 255, 153, 255, 92, 0, 255, 0, 255, 255, 0, 245, 255, 0, 102, 224, 255, 8, 255, 0, 20, 255, 184, 184, 224, 255, 8, 0, 255, 61, 0, 71, 255, 255, 0, 204, 0, 255, 194, 0, 255, 82, 0, 10, 255, 0, 112, 255, 51, 0, 255, 0, 194, 255, 0, 122, 255, 0, 255, 163, 255, 153, 0, 0, 255, 10, 255, 112, 0, 143, 255, 0, 82, 0, 255, 163, 255, 0, 255, 235, 0, 8, 184, 170, 133, 0, 255, 0, 255, 92, 184, 0, 255, 255, 0, 31, 0, 184, 255, 204, 70, 3, 255, 0, 112, 92, 255, 0, 0, 224, 255, 112, 224, 255, 70, 184, 160, 163, 0, 255, 153, 0, 255, 71, 255, 0, 255, 0, 163, 255, 204, 0, 255, 0, 143, 0, 255, 235, 133, 255, 0, 255, 0, 235, 245, 0, 255, 255, 0, 122, 255, 245, 0, 10, 190, 212, 214, 255, 0, 0, 204, 255, 20, 0, 255, 255, 255, 0, 0, 153, 255, 224, 255, 8, 0, 255, 204, 41, 0, 255, 41, 255, 0, 173, 0, 255, 0, 245, 255, 71, 0, 255, 122, 0, 255, 0, 255, 184, 0, 92, 255, 184, 255, 0, 0, 133, 255, 255, 214, 0, 25, 194, 194, 102, 255, 0, 92, 0, 255]
citypallete = [
128,64,128,244,35,232,70,70,70,102,102,156,190,153,153,153,153,153,250,170,30,220,220,0,107,142,35,152,251,152,70,130,180,220,20,60,255,0,0,0,0,142,0,0,70,0,60,100,0,80,100,0,0,230,119,11,32,128,192,0,0,64,128,128,64,128,0,192,128,128,192,128,64,64,0,192,64,0,64,192,0,192,192,0,64,64,128,192,64,128,64,192,128,192,192,128,0,0,64,128,0,64,0,128,64,128,128,64,0,0,192,128,0,192,0,128,192,128,128,192,64,0,64,192,0,64,64,128,64,192,128,64,64,0,192,192,0,192,64,128,192,192,128,192,0,64,64,128,64,64,0,192,64,128,192,64,0,64,192,128,64,192,0,192,192,128,192,192,64,64,64,192,64,64,64,192,64,192,192,64,64,64,192,192,64,192,64,192,192,192,192,192,32,0,0,160,0,0,32,128,0,160,128,0,32,0,128,160,0,128,32,128,128,160,128,128,96,0,0,224,0,0,96,128,0,224,128,0,96,0,128,224,0,128,96,128,128,224,128,128,32,64,0,160,64,0,32,192,0,160,192,0,32,64,128,160,64,128,32,192,128,160,192,128,96,64,0,224,64,0,96,192,0,224,192,0,96,64,128,224,64,128,96,192,128,224,192,128,32,0,64,160,0,64,32,128,64,160,128,64,32,0,192,160,0,192,32,128,192,160,128,192,96,0,64,224,0,64,96,128,64,224,128,64,96,0,192,224,0,192,96,128,192,224,128,192,32,64,64,160,64,64,32,192,64,160,192,64,32,64,192,160,64,192,32,192,192,160,192,192,96,64,64,224,64,64,96,192,64,224,192,64,96,64,192,224,64,192,96,192,192,224,192,192,0,32,0,128,32,0,0,160,0,128,160,0,0,32,128,128,32,128,0,160,128,128,160,128,64,32,0,192,32,0,64,160,0,192,160,0,64,32,128,192,32,128,64,160,128,192,160,128,0,96,0,128,96,0,0,224,0,128,224,0,0,96,128,128,96,128,0,224,128,128,224,128,64,96,0,192,96,0,64,224,0,192,224,0,64,96,128,192,96,128,64,224,128,192,224,128,0,32,64,128,32,64,0,160,64,128,160,64,0,32,192,128,32,192,0,160,192,128,160,192,64,32,64,192,32,64,64,160,64,192,160,64,64,32,192,192,32,192,64,160,192,192,160,192,0,96,64,128,96,64,0,224,64,128,224,64,0,96,192,128,96,192,0,224,192,128,224,192,64,96,64,192,96,64,64,224,64,192,224,64,64,96,192,192,96,192,64,224,192,192,224,192,32,32,0,160,32,0,32,160,0,160,160,0,32,32,128,160,32,128,32,160,128,160,160,128,96,32,0,224,32,0,96,160,0,224,160,0,96,32,128,224,32,128,96,160,128,224,160,128,32,96,0,160,96,0,32,224,0,160,224,0,32,96,128,160,96,128,32,224,128,160,224,128,96,96,0,224,96,0,96,224,0,224,224,0,96,96,128,224,96,128,96,224,128,224,224,128,32,32,64,160,32,64,32,160,64,160,160,64,32,32,192,160,32,192,32,160,192,160,160,192,96,32,64,224,32,64,96,160,64,224,160,64,96,32,192,224,32,192,96,160,192,224,160,192,32,96,64,160,96,64,32,224,64,160,224,64,32,96,192,160,96,192,32,224,192,160,224,192,96,96,64,224,96,64,96,224,64,224,224,64,96,96,192,224,96,192,96,224,192,0,0,0]
|
py
|
1a56e78b4524814c982ef731b03543e9b2deb49c
|
'''Michael Lange <klappnase (at) freakmail (dot) de>
The ToolTip class provides a flexible tooltip widget for Tkinter; it is based on IDLE's ToolTip
module which unfortunately seems to be broken (at least the version I saw).
INITIALIZATION OPTIONS:
anchor : where the text should be positioned inside the widget, must be on of "n", "s", "e", "w", "nw" and so on;
default is "center"
bd : borderwidth of the widget; default is 1 (NOTE: don't use "borderwidth" here)
bg : background color to use for the widget; default is "lightyellow" (NOTE: don't use "background")
delay : time in ms that it takes for the widget to appear on the screen when the mouse pointer has
entered the parent widget; default is 1500
fg : foreground (i.e. text) color to use; default is "black" (NOTE: don't use "foreground")
follow_mouse : if set to 1 the tooltip will follow the mouse pointer instead of being displayed
outside of the parent widget; this may be useful if you want to use tooltips for
large widgets like listboxes or canvases; default is 0
font : font to use for the widget; default is system specific
justify : how multiple lines of text will be aligned, must be "left", "right" or "center"; default is "left"
padx : extra space added to the left and right within the widget; default is 4
pady : extra space above and below the text; default is 2
relief : one of "flat", "ridge", "groove", "raised", "sunken" or "solid"; default is "solid"
state : must be "normal" or "disabled"; if set to "disabled" the tooltip will not appear; default is "normal"
text : the text that is displayed inside the widget
textvariable : if set to an instance of Tkinter.StringVar() the variable's value will be used as text for the widget
width : width of the widget; the default is 0, which means that "wraplength" will be used to limit the widgets width
wraplength : limits the number of characters in each line; default is 150
WIDGET METHODS:
configure(**opts) : change one or more of the widget's options as described above; the changes will take effect the
next time the tooltip shows up; NOTE: follow_mouse cannot be changed after widget initialization
Other widget methods that might be useful if you want to subclass ToolTip:
enter() : callback when the mouse pointer enters the parent widget
leave() : called when the mouse pointer leaves the parent widget
motion() : is called when the mouse pointer moves inside the parent widget if follow_mouse is set to 1 and the
tooltip has shown up to continually update the coordinates of the tooltip window
coords() : calculates the screen coordinates of the tooltip window
create_contents() : creates the contents of the tooltip window (by default a Tkinter.Label)
'''
# Ideas gleaned from PySol
try:
import Tkinter as tkinter
except ImportError:
import tkinter
class ToolTip:
def __init__(self, master, text='Your text here', delay=1500, **opts):
self.master = master
self._opts = {'anchor':'center', 'bd':1, 'bg':'lightyellow', 'delay':delay, 'fg':'black',\
'follow_mouse':0, 'font':None, 'justify':'left', 'padx':4, 'pady':2,\
'relief':'solid', 'state':'normal', 'text':text, 'textvariable':None,\
'width':0, 'wraplength':150}
self.configure(**opts)
self._tipwindow = None
self._id = None
self._id1 = self.master.bind("<Enter>", self.enter, '+')
self._id2 = self.master.bind("<Leave>", self.leave, '+')
self._id3 = self.master.bind("<ButtonPress>", self.leave, '+')
self._follow_mouse = 0
if self._opts['follow_mouse']:
self._id4 = self.master.bind("<Motion>", self.motion, '+')
self._follow_mouse = 1
def configure(self, **opts):
for key in opts:
if key in self._opts:
self._opts[key] = opts[key]
else:
KeyError = 'KeyError: Unknown option: "%s"' % key
raise KeyError
##----these methods handle the callbacks on "<Enter>", "<Leave>" and "<Motion>"---------------##
##----events on the parent widget; override them if you want to change the widget's behavior--##
def enter(self, event=None):
self._schedule()
def leave(self, event=None):
self._unschedule()
self._hide()
def motion(self, event=None):
if self._tipwindow and self._follow_mouse:
x, y = self.coords()
self._tipwindow.wm_geometry("+%d+%d" % (x, y))
##------the methods that do the work:---------------------------------------------------------##
def _schedule(self):
self._unschedule()
if self._opts['state'] == 'disabled':
return
self._id = self.master.after(self._opts['delay'], self._show)
def _unschedule(self):
id = self._id
self._id = None
if id:
self.master.after_cancel(id)
def _show(self):
if self._opts['state'] == 'disabled':
self._unschedule()
return
if not self._tipwindow:
self._tipwindow = tw = tkinter.Toplevel(self.master)
# hide the window until we know the geometry
tw.withdraw()
tw.wm_overrideredirect(1)
if tw.tk.call("tk", "windowingsystem") == 'aqua':
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "none")
self.create_contents()
tw.update_idletasks()
x, y = self.coords()
tw.wm_geometry("+%d+%d" % (x, y))
tw.deiconify()
def _hide(self):
tw = self._tipwindow
self._tipwindow = None
if tw:
tw.destroy()
##----these methods might be overridden in derived classes:----------------------------------##
def coords(self):
# The tip window must be completely outside the master widget;
# otherwise when the mouse enters the tip window we get
# a leave event and it disappears, and then we get an enter
# event and it reappears, and so on forever :-(
# or we take care that the mouse pointer is always outside the tipwindow :-)
tw = self._tipwindow
twx, twy = tw.winfo_reqwidth(), tw.winfo_reqheight()
w, h = tw.winfo_screenwidth(), tw.winfo_screenheight()
# calculate the y coordinate:
if self._follow_mouse:
y = tw.winfo_pointery() + 20
# make sure the tipwindow is never outside the screen:
if y + twy > h:
y = y - twy - 30
else:
y = self.master.winfo_rooty() + self.master.winfo_height() + 3
if y + twy > h:
y = self.master.winfo_rooty() - twy - 3
# we can use the same x coord in both cases:
x = tw.winfo_pointerx() - twx / 2
if x < 0:
x = 0
elif x + twx > w:
x = w - twx
return x, y
def create_contents(self):
opts = self._opts.copy()
for opt in ('delay', 'follow_mouse', 'state'):
del opts[opt]
label = tkinter.Label(self._tipwindow, **opts)
label.pack()
##---------demo code-----------------------------------##
def demo():
root = tkinter.Tk(className='ToolTip-demo')
l = tkinter.Listbox(root)
l.insert('end', "I'm a listbox")
l.pack(side='top')
t1 = ToolTip(l, follow_mouse=1, text="I'm a tooltip with follow_mouse set to 1, so I won't be placed outside my parent")
b = tkinter.Button(root, text='Quit', command=root.quit)
b.pack(side='bottom')
t2 = ToolTip(b, text='Enough of this')
root.mainloop()
if __name__ == '__main__':
demo()
|
py
|
1a56e7ed8d52059d680c2cee52793368a0e2c54f
|
import json
from collections import Counter
class Sorter(object):
def __init__(self):
# These are playlists that are to be edited, this can be changed
from configparser import ConfigParser; config = ConfigParser(); \
config.read('config.ini'); \
self.target_playlists = config.get('Settings', 'TargetPlaylists')
self.target_playlists = self.target_playlists.split(',')
self.target_playlists = [x.strip() for x in self.target_playlists]
def findtargetPlaylists(self, playlist_json, length_check=False):
found_playlists = {}
if(isinstance(playlist_json, (str, bytes))): playlist_json = json.loads(playlist_json)
playlist_names = self.findPlaylists(playlist_json)
for i, playlist in enumerate(playlist_names):
if playlist in self.target_playlists: found_playlists[playlist] = playlist_json['items'][i]['id']
if(length_check):
if len(playlist_json['items']) == 20: extension_state=True
else: extension_state=False
return(found_playlists, extension_state)
return(found_playlists)
def findPlaylists(self, playlist_json):
found_playlists = []
if(isinstance(playlist_json, (str, bytes))): playlist_json = json.loads(playlist_json)
for item in playlist_json['items']: found_playlists.append(f"{item['name']}")
return(found_playlists)
def findSongs(self, recent_json):
recent_songs = []
if(isinstance(recent_json, (str, bytes))): recent_json = json.loads(recent_json)
for item in recent_json['items']: recent_songs.append(f"{item['track']['name']}:{item['track']['id']}")
return(recent_songs)
def findtopSongs(self, songs_list):
listened_songs = []
for vector in songs_list: listened_songs.append(vector)
song_counter = Counter(listened_songs)
top_songs = sorted(song_counter, key=lambda x: -song_counter[x])
songs_counted = song_counter.most_common()
return(songs_counted)
def findtimeAdded(self, playlist_json):
if(isinstance(playlist_json, (str, bytes))): playlist_json = json.loads(playlist_json)
elif(isinstance(playlist_json, dict)): pass
else: return
timestamp_list = [item['added_at'] for item in playlist_json['items']]
return(timestamp_list)
def compare_recent(self, recent_json, new_json):
new_songs = self.findSongs(new_json)
recent_songs = self.findSongs(recent_json)
return not(new_songs == recent_songs)
|
py
|
1a56e881b8bc5ffe03c8c648d023eff59c9af3b2
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import JdCoinTestFramework
from test_framework.util import (
assert_equal,
wait_until,
connect_nodes,
)
class NotificationsTest(JdCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=4",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes(self.nodes[0], 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(51)
self.sync_all()
# Give jdcoind 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
self.sync_all()
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
|
py
|
1a56e8b235e8ffd6f66024fa8bac3824427df911
|
import os
def rename_files():
# (1) get file names from a folder
file_list = os.listdir("prank")
print(file_list)
saved_path = os.getcwd()
print("Current Working Directory is %s" % saved_path)
os.chdir("prank")
# (2) for each file, rename filename
for file_name in file_list:
os.rename(file_name, file_name.translate(None, "0123456789"))
os.chdir(saved_path)
rename_files()
# os.rename("test", "test3")
# os.rename("test1", "test2")
|
py
|
1a56e9195c9dd453e45598166fcb63ed508846f3
|
from pymongo import MongoClient
client = MongoClient("mongodb://127.0.0.1:27017/")
database = "mydb"
collections = ["cars", "cops"]
database = client[database]
cars = database[collections[0]]
cops = database[collections[1]]
def get_all_cars():
return cars.find()
def get_free_cars():
return cars.find({"resolved": 0, "assigned": 0})
def put_car(car):
car['cop_id'] = ""
car['resolved'] = 0
car['assigned'] = 0
car_id = cars.insert_one(car).inserted_id
return car_id
def get_all_cops():
return cops.find()
def get_available_cops():
return cops.find({"available": 1})
def put_cop(cop):
cop['available'] = 1
cop['car_id'] = ""
cop_id = cops.insert_one(cop).inserted_id
return cop_id
def assign_cop_car(cop_id, car_id):
if list(cops.find_one({"_id": cop_id}, {"_id": 0, "available": 1}).values()) != [1]: return "failed"
if list(cars.find_one({"_id": car_id}, {"_id": 0, "resolved": 1, "assigned": 1}).values()) != [0, 0]: return "failed"
cops.update_one({"_id": cop_id}, {"$set": {"available": 0, "car_id": car_id}})
cars.update_one({"_id": car_id}, {"$set": {"assigned": 1, "cop_id": cop_id}})
return "assigned"
def complete_assignment(cop_id, car_id):
if list(cops.find_one({"_id": cop_id}, {"_id": 0, "available": 1}).values()) != [0]: return "failed"
if list(cars.find_one({"_id": car_id}, {"_id": 0, "resolved": 1, "assigned": 1}).values()) != [0, 1]: return "failed"
cops.update_one({"_id": cop_id}, {"$set": {"available": 1}})
cops.update_one({"_id": cop_id}, {"$set": {"car_id": ""}})
cars.update_one({"_id": car_id}, {"$set": {"assigned": 0}})
cars.update_one({"_id": car_id}, {"$set": {"resolved": 1}})
cars.update_one({"_id": car_id}, {"$set": {"cop_id": ""}})
return "resolved"
|
py
|
1a56ebaa676367536cb856879110bf3bbc96449c
|
"""Utilities related archives.
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import shutil
import stat
import tarfile
import zipfile
from pip._internal.exceptions import InstallationError
from pip._internal.utils.filetypes import (
BZ2_EXTENSIONS,
TAR_EXTENSIONS,
XZ_EXTENSIONS,
ZIP_EXTENSIONS,
)
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterable, List, Optional, Text, Union
logger = logging.getLogger(__name__)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug("bz2 module is not available")
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug("lzma module is not available")
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def split_leading_dir(path):
# type: (Union[str, Text]) -> List[Union[str, Text]]
path = path.lstrip("/").lstrip("\\")
if "/" in path and (
("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
):
return path.split("/", 1)
elif "\\" in path:
return path.split("\\", 1)
else:
return [path, ""]
def has_leading_dir(paths):
# type: (Iterable[Union[str, Text]]) -> bool
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def is_within_directory(directory, target):
# type: ((Union[str, Text]), (Union[str, Text])) -> bool
"""
Return true if the absolute path of target is within the directory
"""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, "rb")
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not is_within_directory(location, fn):
message = (
"The zip file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, fn, location))
if fn.endswith("/") or fn.endswith("\\"):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
# Don't use read() to avoid allocating an arbitrarily large
# chunk of memory for the file's content
fp = zip.open(name)
try:
with open(fn, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
# type: (str, str) -> None
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
mode = "r:gz"
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = "r:bz2"
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = "r:xz"
elif filename.lower().endswith(".tar"):
mode = "r"
else:
logger.warning(
"Cannot determine compression type for file %s", filename,
)
mode = "r:*"
tar = tarfile.open(filename, mode)
try:
leading = has_leading_dir([member.name for member in tar.getmembers()])
for member in tar.getmembers():
fn = member.name
if leading:
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
if not is_within_directory(location, path):
message = (
"The tar file ({}) has a file ({}) trying to install "
"outside target directory ({})"
)
raise InstallationError(message.format(filename, path, location))
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
# https://github.com/python/typeshed/issues/2673
tar._extract_member(member, path) # type: ignore
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
"In the tar file %s the member %s is invalid: %s",
filename,
member.name,
exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, "wb") as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
# https://github.com/python/typeshed/issues/2673
tar.utime(member, path) # type: ignore
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(
filename, # type: str
location, # type: str
content_type=None, # type: Optional[str]
):
# type: (...) -> None
filename = os.path.realpath(filename)
if (
content_type == "application/zip"
or filename.lower().endswith(ZIP_EXTENSIONS)
or zipfile.is_zipfile(filename)
):
unzip_file(filename, location, flatten=not filename.endswith(".whl"))
elif (
content_type == "application/x-gzip"
or tarfile.is_tarfile(filename)
or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
):
untar_file(filename, location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
"Cannot unpack file %s (downloaded from %s, content-type: %s); "
"cannot detect archive format",
filename,
location,
content_type,
)
raise InstallationError(
"Cannot determine archive format of {}".format(location)
)
|
py
|
1a56ebd1f1e5c924f4155b1018d4a414bbb1d4ec
|
# -*- coding: utf-8 -*-
""" Deeplabv3+ model for Keras.
This model is based on TF repo:
https://github.com/tensorflow/models/tree/master/research/deeplab
On Pascal VOC, original model gets to 84.56% mIOU
Now this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers, but Theano will add
this layer soon.
MobileNetv2 backbone is based on this repo:
https://github.com/JonathanCMitchell/mobilenet_v2_keras
# Reference
- [Encoder-Decoder with Atrous Separable Convolution
for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
- [Xception: Deep Learning with Depthwise Separable Convolutions]
(https://arxiv.org/abs/1610.02357)
- [Inverted Residuals and Linear Bottlenecks: Mobile Networks for
Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from keras.models import Model
from keras import layers
from keras.layers import Input
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import Add
from keras.layers import Dropout
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import DepthwiseConv2D
from keras.layers import ZeroPadding2D
from keras.layers import AveragePooling2D
from keras.engine import Layer
from keras.engine import InputSpec
from keras.engine.topology import get_source_inputs
from keras import backend as K
from keras.applications import imagenet_utils
from keras.utils import conv_utils
from keras.utils.data_utils import get_file
WEIGHTS_PATH_X = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"
class BilinearUpsampling(Layer):
"""Just a simple bilinear upsampling layer. Works only with TF.
Args:
upsampling: tuple of 2 numbers > 0. The upsampling ratio for h and w
output_size: used instead of upsampling arg if passed!
"""
def __init__(self, upsampling=(2, 2), output_size=None, data_format=None, **kwargs):
super(BilinearUpsampling, self).__init__(**kwargs)
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
if output_size:
self.output_size = conv_utils.normalize_tuple(
output_size, 2, 'output_size')
self.upsampling = None
else:
self.output_size = None
self.upsampling = conv_utils.normalize_tuple(
upsampling, 2, 'upsampling')
def compute_output_shape(self, input_shape):
if self.upsampling:
height = self.upsampling[0] * \
input_shape[1] if input_shape[1] is not None else None
width = self.upsampling[1] * \
input_shape[2] if input_shape[2] is not None else None
else:
height = self.output_size[0]
width = self.output_size[1]
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
if self.upsampling:
return K.tf.image.resize_bilinear(inputs, (inputs.shape[1] * self.upsampling[0],
inputs.shape[2] * self.upsampling[1]),
align_corners=True)
else:
return K.tf.image.resize_bilinear(inputs, (self.output_size[0],
self.output_size[1]),
align_corners=True)
def get_config(self):
config = {'upsampling': self.upsampling,
'output_size': self.output_size,
'data_format': self.data_format}
base_config = super(BilinearUpsampling, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation('relu')(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
"""Implements right 'same' padding for even kernel sizes
Without this there is a 1 pixel drift when stride = 2
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
"""
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
""" Basic building block of modified Xception network
Args:
inputs: input tensor
depth_list: number of filters in each SepConv layer. len(depth_list) == 3
prefix: prefix before name
skip_connection_type: one of {'conv','sum','none'}
stride: stride at last depthwise conv
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
return_skip: flag to return additional tensor after 2 SepConvs for decoder
"""
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = layers.add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = layers.add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def relu6(x):
return K.relu(x, max_value=6)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
x = Activation(relu6, name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = Activation(relu6, name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2', OS=16, alpha=1.):
""" Instantiates the Deeplabv3+ architecture
Optionally loads weights pre-trained
on PASCAL VOC. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
# Arguments
weights: one of 'pascal_voc' (pre-trained on pascal voc)
or None (random initialization)
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: shape of input image. format HxWxC
PASCAL VOC model was trained on (512,512,3) images
classes: number of desired classes. If classes != 21,
last layer is initialized randomly
backbone: backbone to use. one of {'xception','mobilenetv2'}
OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
Used only for xception backbone.
alpha: controls the width of the MobileNetV2 network. This is known as the
width multiplier in the MobileNetV2 paper.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
Used only for mobilenetv2 backbone
# Returns
A Keras model instance.
# Raises
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
ValueError: in case of invalid argument for `weights` or `backbone`
"""
if not (weights in {'pascal_voc', None}):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `pascal_voc` '
'(pre-trained on PASCAL VOC)')
if K.backend() != 'tensorflow':
raise RuntimeError('The Deeplabv3+ model is only available with '
'the TensorFlow backend.')
if not (backbone in {'xception', 'mobilenetv2'}):
raise ValueError('The `backbone` argument should be either '
'`xception` or `mobilenetv2` ')
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation('relu')(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation('relu')(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same',
use_bias=False, name='Conv')(img_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Activation(relu6, name='Conv_Relu6')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
#out_shape = int(np.ceil(input_shape[0] / OS))
b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(x)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation('relu')(b4)
b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)), int(np.ceil(input_shape[1] / OS))))(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation('relu', name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
x = BilinearUpsampling(output_size=(int(np.ceil(input_shape[0] / 4)),
int(np.ceil(input_shape[1] / 4))))(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
use_bias=False, name='feature_projection0')(skip1)
dec_skip1 = BatchNormalization(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation('relu')(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# you can use it with arbitary number of classes
if classes == 21:
last_layer_name = 'logits_semantic'
else:
last_layer_name = 'custom_logits_semantic'
x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x)
x = BilinearUpsampling(output_size=(input_shape[0], input_shape[1]))(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='deeplabv3plus')
# load weights
if weights == 'pascal_voc':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_X,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_MOBILE,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Input array scaled to [-1.,1.]
"""
return imagenet_utils.preprocess_input(x, mode='tf')
|
py
|
1a56ec7ccca5dd7409052e32a4fc5efdae1d0ff3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class UnbindVbrRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'UnbindVbr','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_VbrId(self): # String
return self.get_query_params().get('VbrId')
def set_VbrId(self, VbrId): # String
self.add_query_param('VbrId', VbrId)
def get_VbrRegionId(self): # String
return self.get_query_params().get('VbrRegionId')
def set_VbrRegionId(self, VbrRegionId): # String
self.add_query_param('VbrRegionId', VbrRegionId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_SmartAGUid(self): # Long
return self.get_query_params().get('SmartAGUid')
def set_SmartAGUid(self, SmartAGUid): # Long
self.add_query_param('SmartAGUid', SmartAGUid)
def get_SmartAGId(self): # String
return self.get_query_params().get('SmartAGId')
def set_SmartAGId(self, SmartAGId): # String
self.add_query_param('SmartAGId', SmartAGId)
|
py
|
1a56ec97112a5f3b666ed13f0a94c3d28c786f2a
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic base exception handling.
SHOULD include dedicated exception logging.
"""
import collections
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from ironic.common.i18n import _
from ironic.conf import CONF
LOG = logging.getLogger(__name__)
def _ensure_exception_kwargs_serializable(exc_class_name, kwargs):
"""Ensure that kwargs are serializable
Ensure that all kwargs passed to exception constructor can be passed over
RPC, by trying to convert them to JSON, or, as a last resort, to string.
If it is not possible, unserializable kwargs will be removed, letting the
receiver to handle the exception string as it is configured to.
:param exc_class_name: an IronicException class name.
:param kwargs: a dictionary of keyword arguments passed to the exception
constructor.
:returns: a dictionary of serializable keyword arguments.
"""
serializers = [(jsonutils.dumps, _('when converting to JSON')),
(six.text_type, _('when converting to string'))]
exceptions = collections.defaultdict(list)
serializable_kwargs = {}
for k, v in kwargs.items():
for serializer, msg in serializers:
try:
serializable_kwargs[k] = serializer(v)
exceptions.pop(k, None)
break
except Exception as e:
exceptions[k].append(
'(%(serializer_type)s) %(e_type)s: %(e_contents)s' %
{'serializer_type': msg, 'e_contents': e,
'e_type': e.__class__.__name__})
if exceptions:
LOG.error("One or more arguments passed to the %(exc_class)s "
"constructor as kwargs can not be serialized. The "
"serialized arguments: %(serialized)s. These "
"unserialized kwargs were dropped because of the "
"exceptions encountered during their "
"serialization:\n%(errors)s",
dict(errors=';\n'.join("%s: %s" % (k, '; '.join(v))
for k, v in exceptions.items()),
exc_class=exc_class_name,
serialized=serializable_kwargs))
# We might be able to actually put the following keys' values into
# format string, but there is no guarantee, drop it just in case.
for k in exceptions:
del kwargs[k]
return serializable_kwargs
class IronicException(Exception):
"""Base Ironic Exception
To correctly use this class, inherit from it and define
a '_msg_fmt' property. That message will get printf'd
with the keyword arguments provided to the constructor.
If you need to access the message from an exception you should use
six.text_type(exc)
"""
_msg_fmt = _("An unknown exception occurred.")
code = http_client.INTERNAL_SERVER_ERROR
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = _ensure_exception_kwargs_serializable(
self.__class__.__name__, kwargs)
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
else:
self.code = int(kwargs['code'])
if not message:
try:
message = self._msg_fmt % kwargs
except Exception as e:
# kwargs doesn't match a variable in self._msg_fmt
# log the issue and the kwargs
prs = ', '.join('%s: %s' % pair for pair in kwargs.items())
LOG.exception('Exception in string format operation '
'(arguments %s)', prs)
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core self._msg_fmt out if something
# happened
message = self._msg_fmt
super(IronicException, self).__init__(message)
def __str__(self):
"""Encode to utf-8 then wsme api can consume it as well."""
if not six.PY3:
return six.text_type(self.args[0]).encode('utf-8')
return self.args[0]
def __unicode__(self):
"""Return a unicode representation of the exception message."""
return six.text_type(self.args[0])
class NotAuthorized(IronicException):
_msg_fmt = _("Not authorized.")
code = http_client.FORBIDDEN
class OperationNotPermitted(NotAuthorized):
_msg_fmt = _("Operation not permitted.")
class Invalid(IronicException):
_msg_fmt = _("Unacceptable parameters.")
code = http_client.BAD_REQUEST
class Conflict(IronicException):
_msg_fmt = _('Conflict.')
code = http_client.CONFLICT
class TemporaryFailure(IronicException):
_msg_fmt = _("Resource temporarily unavailable, please retry.")
code = http_client.SERVICE_UNAVAILABLE
class NotAcceptable(IronicException):
# TODO(deva): We need to set response headers in the API for this exception
_msg_fmt = _("Request not acceptable.")
code = http_client.NOT_ACCEPTABLE
class InvalidState(Conflict):
_msg_fmt = _("Invalid resource state.")
class NodeAlreadyExists(Conflict):
_msg_fmt = _("A node with UUID %(uuid)s already exists.")
class MACAlreadyExists(Conflict):
_msg_fmt = _("A port with MAC address %(mac)s already exists.")
class ChassisAlreadyExists(Conflict):
_msg_fmt = _("A chassis with UUID %(uuid)s already exists.")
class PortAlreadyExists(Conflict):
_msg_fmt = _("A port with UUID %(uuid)s already exists.")
class PortgroupAlreadyExists(Conflict):
_msg_fmt = _("A portgroup with UUID %(uuid)s already exists.")
class PortgroupDuplicateName(Conflict):
_msg_fmt = _("A portgroup with name %(name)s already exists.")
class PortgroupMACAlreadyExists(Conflict):
_msg_fmt = _("A portgroup with MAC address %(mac)s already exists.")
class InstanceAssociated(Conflict):
_msg_fmt = _("Instance %(instance_uuid)s is already associated with a "
"node, it cannot be associated with this other node %(node)s")
class DuplicateName(Conflict):
_msg_fmt = _("A node with name %(name)s already exists.")
class VolumeConnectorAlreadyExists(Conflict):
_msg_fmt = _("A volume connector with UUID %(uuid)s already exists.")
class VolumeConnectorTypeAndIdAlreadyExists(Conflict):
_msg_fmt = _("A volume connector with type %(type)s and connector ID "
"%(connector_id)s already exists.")
class VolumeTargetAlreadyExists(Conflict):
_msg_fmt = _("A volume target with UUID %(uuid)s already exists.")
class VolumeTargetBootIndexAlreadyExists(Conflict):
_msg_fmt = _("A volume target with boot index '%(boot_index)s' "
"for the same node already exists.")
class VifAlreadyAttached(Conflict):
_msg_fmt = _("Unable to attach VIF because VIF %(vif)s is already "
"attached to Ironic %(object_type)s %(object_uuid)s")
class NoFreePhysicalPorts(Invalid):
_msg_fmt = _("Unable to attach VIF %(vif)s, not "
"enough free physical ports.")
class VifNotAttached(Invalid):
_msg_fmt = _("Unable to detach VIF %(vif)s from node %(node)s "
"because it is not attached to it.")
class InvalidUUID(Invalid):
_msg_fmt = _("Expected a UUID but received %(uuid)s.")
class InvalidUuidOrName(Invalid):
_msg_fmt = _("Expected a logical name or UUID but received %(name)s.")
class InvalidName(Invalid):
_msg_fmt = _("Expected a logical name but received %(name)s.")
class InvalidConductorGroup(Invalid):
_msg_fmt = _("Expected a conductor group but received %(group)s.")
class InvalidIdentity(Invalid):
_msg_fmt = _("Expected a UUID or int but received %(identity)s.")
class InvalidMAC(Invalid):
_msg_fmt = _("Expected a MAC address but received %(mac)s.")
class InvalidSwitchID(Invalid):
_msg_fmt = _("Expected a MAC address or OpenFlow datapath ID but "
"received %(switch_id)s.")
class InvalidDatapathID(Invalid):
_msg_fmt = _("Expected an OpenFlow datapath ID but received "
"%(datapath_id)s.")
class InvalidStateRequested(Invalid):
_msg_fmt = _('The requested action "%(action)s" can not be performed '
'on node "%(node)s" while it is in state "%(state)s".')
class PatchError(Invalid):
_msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s")
class InstanceDeployFailure(IronicException):
_msg_fmt = _("Failed to deploy instance: %(reason)s")
class ImageUnacceptable(IronicException):
_msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class ImageConvertFailed(IronicException):
_msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
_msg_fmt = "%(err)s"
class MissingParameterValue(InvalidParameterValue):
_msg_fmt = "%(err)s"
class Duplicate(IronicException):
_msg_fmt = _("Resource already exists.")
class NotFound(IronicException):
_msg_fmt = _("Resource could not be found.")
code = http_client.NOT_FOUND
class DHCPLoadError(IronicException):
_msg_fmt = _("Failed to load DHCP provider %(dhcp_provider_name)s, "
"reason: %(reason)s")
# TODO(dtantsur): word "driver" is overused in class names here, and generally
# means stevedore driver, not ironic driver. Rename them in the future.
class DriverNotFound(NotFound):
_msg_fmt = _("Could not find the following driver(s) or hardware type(s): "
"%(driver_name)s.")
class DriverNotFoundInEntrypoint(DriverNotFound):
_msg_fmt = _("Could not find the following items in the "
"'%(entrypoint)s' entrypoint: %(names)s.")
class InterfaceNotFoundInEntrypoint(InvalidParameterValue):
_msg_fmt = _("Could not find the following interface in the "
"'%(entrypoint)s' entrypoint: %(iface)s. Valid interfaces "
"are %(valid)s.")
class IncompatibleInterface(InvalidParameterValue):
_msg_fmt = _("%(interface_type)s interface implementation "
"'%(interface_impl)s' is not supported by hardware type "
"%(hardware_type)s.")
class NoValidDefaultForInterface(InvalidParameterValue):
# NOTE(rloo): in the line below, there is no blank space after 'For'
# because node_info could be an empty string. If node_info
# is not empty, it should start with a space.
_msg_fmt = _("For%(node_info)s hardware type '%(driver)s', no default "
"value found for %(interface_type)s interface.")
class ImageNotFound(NotFound):
_msg_fmt = _("Image %(image_id)s could not be found.")
class NoValidHost(NotFound):
_msg_fmt = _("No valid host was found. Reason: %(reason)s")
class InstanceNotFound(NotFound):
_msg_fmt = _("Instance %(instance)s could not be found.")
class InputFileError(IronicException):
_msg_fmt = _("Error with file %(file_name)s. Reason: %(reason)s")
class NodeNotFound(NotFound):
_msg_fmt = _("Node %(node)s could not be found.")
class PortgroupNotFound(NotFound):
_msg_fmt = _("Portgroup %(portgroup)s could not be found.")
class PortgroupNotEmpty(Invalid):
_msg_fmt = _("Cannot complete the requested action because portgroup "
"%(portgroup)s contains ports.")
class NodeAssociated(InvalidState):
_msg_fmt = _("Node %(node)s is associated with instance %(instance)s.")
class PortNotFound(NotFound):
_msg_fmt = _("Port %(port)s could not be found.")
class FailedToUpdateDHCPOptOnPort(IronicException):
_msg_fmt = _("Update DHCP options on port: %(port_id)s failed.")
class FailedToCleanDHCPOpts(IronicException):
_msg_fmt = _("Clean up DHCP options on node: %(node)s failed.")
class FailedToGetIPAddressOnPort(IronicException):
_msg_fmt = _("Retrieve IP address on port: %(port_id)s failed.")
class InvalidIPv4Address(IronicException):
_msg_fmt = _("Invalid IPv4 address %(ip_address)s.")
class FailedToUpdateMacOnPort(IronicException):
_msg_fmt = _("Update MAC address on port: %(port_id)s failed.")
class ChassisNotFound(NotFound):
_msg_fmt = _("Chassis %(chassis)s could not be found.")
class VolumeConnectorNotFound(NotFound):
_msg_fmt = _("Volume connector %(connector)s could not be found.")
class VolumeTargetNotFound(NotFound):
_msg_fmt = _("Volume target %(target)s could not be found.")
class NoDriversLoaded(IronicException):
_msg_fmt = _("Conductor %(conductor)s cannot be started "
"because no hardware types were loaded.")
class ConductorNotFound(NotFound):
_msg_fmt = _("Conductor %(conductor)s could not be found.")
class ConductorAlreadyRegistered(IronicException):
_msg_fmt = _("Conductor %(conductor)s already registered.")
class ConductorHardwareInterfacesAlreadyRegistered(IronicException):
_msg_fmt = _("At least one of these (hardware type %(hardware_type)s, "
"interface type %(interface_type)s, interfaces "
"%(interfaces)s) combinations are already registered for "
"this conductor.")
class PowerStateFailure(InvalidState):
_msg_fmt = _("Failed to set node power state to %(pstate)s.")
class ExclusiveLockRequired(NotAuthorized):
_msg_fmt = _("An exclusive lock is required, "
"but the current context has a shared lock.")
class NodeMaintenanceFailure(Invalid):
_msg_fmt = _("Failed to toggle maintenance-mode flag "
"for node %(node)s: %(reason)s")
class NodeConsoleNotEnabled(Invalid):
_msg_fmt = _("Console access is not enabled on node %(node)s")
class NodeInMaintenance(Invalid):
_msg_fmt = _("The %(op)s operation can't be performed on node "
"%(node)s because it's in maintenance mode.")
class ChassisNotEmpty(Invalid):
_msg_fmt = _("Cannot complete the requested action because chassis "
"%(chassis)s contains nodes.")
class IPMIFailure(IronicException):
_msg_fmt = _("IPMI call failed: %(cmd)s.")
class UnsupportedDriverExtension(Invalid):
_msg_fmt = _('Driver %(driver)s does not support %(extension)s '
'(disabled or not implemented).')
class GlanceConnectionFailed(IronicException):
_msg_fmt = _("Connection to glance endpoint %(endpoint)s failed: "
"%(reason)s")
class ImageNotAuthorized(NotAuthorized):
_msg_fmt = _("Not authorized for image %(image_id)s.")
class InvalidImageRef(Invalid):
_msg_fmt = _("Invalid image href %(image_href)s.")
class ImageRefValidationFailed(IronicException):
_msg_fmt = _("Validation of image href %(image_href)s failed, "
"reason: %(reason)s")
class ImageDownloadFailed(IronicException):
_msg_fmt = _("Failed to download image %(image_href)s, reason: %(reason)s")
class KeystoneUnauthorized(IronicException):
_msg_fmt = _("Not authorized in Keystone.")
class KeystoneFailure(IronicException):
pass
class CatalogNotFound(IronicException):
_msg_fmt = _("Service type %(service_type)s with endpoint type "
"%(endpoint_type)s not found in keystone service catalog.")
class ServiceUnavailable(IronicException):
_msg_fmt = _("Connection failed")
class Forbidden(IronicException):
_msg_fmt = _("Requested OpenStack Images API is forbidden")
class BadRequest(IronicException):
pass
class InvalidEndpoint(IronicException):
_msg_fmt = _("The provided endpoint is invalid")
class CommunicationError(IronicException):
_msg_fmt = _("Unable to communicate with the server.")
class HTTPForbidden(NotAuthorized):
_msg_fmt = _("Access was denied to the following resource: %(resource)s")
class Unauthorized(IronicException):
pass
class HTTPNotFound(NotFound):
pass
class ConfigNotFound(IronicException):
_msg_fmt = _("Could not find config at %(path)s")
class NodeLocked(Conflict):
_msg_fmt = _("Node %(node)s is locked by host %(host)s, please retry "
"after the current operation is completed.")
class NodeNotLocked(Invalid):
_msg_fmt = _("Node %(node)s found not to be locked on release")
class NoFreeConductorWorker(TemporaryFailure):
_msg_fmt = _('Requested action cannot be performed due to lack of free '
'conductor workers.')
code = http_client.SERVICE_UNAVAILABLE
class VendorPassthruException(IronicException):
pass
class ConfigInvalid(IronicException):
_msg_fmt = _("Invalid configuration file. %(error_msg)s")
class DriverLoadError(IronicException):
_msg_fmt = _("Driver, hardware type or interface %(driver)s could not be "
"loaded. Reason: %(reason)s.")
class DriverOperationError(IronicException):
_msg_fmt = _("Runtime driver %(driver)s failure. Reason: %(reason)s.")
class ConsoleError(IronicException):
pass
class NoConsolePid(ConsoleError):
_msg_fmt = _("Could not find pid in pid file %(pid_path)s")
class ConsoleSubprocessFailed(ConsoleError):
_msg_fmt = _("Console subprocess failed to start. %(error)s")
class PasswordFileFailedToCreate(IronicException):
_msg_fmt = _("Failed to create the password file. %(error)s")
class IloOperationError(DriverOperationError):
_msg_fmt = _("%(operation)s failed, error: %(error)s")
class IloOperationNotSupported(DriverOperationError):
_msg_fmt = _("%(operation)s not supported. error: %(error)s")
class DracOperationError(DriverOperationError):
_msg_fmt = _('DRAC operation failed. Reason: %(error)s')
class FailedToGetSensorData(IronicException):
_msg_fmt = _("Failed to get sensor data for node %(node)s. "
"Error: %(error)s")
class FailedToParseSensorData(IronicException):
_msg_fmt = _("Failed to parse sensor data for node %(node)s. "
"Error: %(error)s")
class InsufficientDiskSpace(IronicException):
_msg_fmt = _("Disk volume where '%(path)s' is located doesn't have "
"enough disk space. Required %(required)d MiB, "
"only %(actual)d MiB available space present.")
class ImageCreationFailed(IronicException):
_msg_fmt = _('Creating %(image_type)s image failed: %(error)s')
class SwiftOperationError(IronicException):
_msg_fmt = _("Swift operation '%(operation)s' failed: %(error)s")
class SwiftObjectNotFoundError(SwiftOperationError):
_msg_fmt = _("Swift object %(obj)s from container %(container)s "
"not found. Operation '%(operation)s' failed.")
class SwiftTempUrlKeyNotFoundError(SwiftOperationError):
_msg_fmt = _("Swift Temp-Url-Key property not found in account."
" Operation '%(operation)s' failed.")
class SNMPFailure(DriverOperationError):
_msg_fmt = _("SNMP operation '%(operation)s' failed: %(error)s")
class FileSystemNotSupported(IronicException):
_msg_fmt = _("Failed to create a file system. "
"File system %(fs)s is not supported.")
class IRMCOperationError(DriverOperationError):
_msg_fmt = _('iRMC %(operation)s failed. Reason: %(error)s')
class IRMCSharedFileSystemNotMounted(DriverOperationError):
_msg_fmt = _("iRMC shared file system '%(share)s' is not mounted.")
class HardwareInspectionFailure(IronicException):
_msg_fmt = _("Failed to inspect hardware. Reason: %(error)s")
class NodeCleaningFailure(IronicException):
_msg_fmt = _("Failed to clean node %(node)s: %(reason)s")
class PathNotFound(IronicException):
_msg_fmt = _("Path %(dir)s does not exist.")
class DirectoryNotWritable(IronicException):
_msg_fmt = _("Directory %(dir)s is not writable.")
class UcsOperationError(DriverOperationError):
_msg_fmt = _("Cisco UCS client: operation %(operation)s failed for node"
" %(node)s. Reason: %(error)s")
class UcsConnectionError(IronicException):
_msg_fmt = _("Cisco UCS client: connection failed for node "
"%(node)s. Reason: %(error)s")
class ImageUploadFailed(IronicException):
_msg_fmt = _("Failed to upload %(image_name)s image to web server "
"%(web_server)s, reason: %(reason)s")
class CIMCException(DriverOperationError):
_msg_fmt = _("Cisco IMC exception occurred for node %(node)s: %(error)s")
class OneViewError(DriverOperationError):
_msg_fmt = _("OneView exception occurred. Error: %(error)s")
class OneViewInvalidNodeParameter(OneViewError):
_msg_fmt = _("Error while obtaining OneView info from node %(node_uuid)s. "
"Error: %(error)s")
class NodeTagNotFound(IronicException):
_msg_fmt = _("Node %(node_id)s doesn't have a tag '%(tag)s'")
class NetworkError(IronicException):
_msg_fmt = _("Network operation failure.")
class IncompleteLookup(Invalid):
_msg_fmt = _("At least one of 'addresses' and 'node_uuid' parameters "
"is required")
class NotificationSchemaObjectError(IronicException):
_msg_fmt = _("Expected object %(obj)s when populating notification payload"
" but got object %(source)s")
class NotificationSchemaKeyError(IronicException):
_msg_fmt = _("Object %(obj)s doesn't have the field \"%(field)s\" "
"required for populating notification schema key "
"\"%(key)s\"")
class NotificationPayloadError(IronicException):
_msg_fmt = _("Payload not populated when trying to send notification "
"\"%(class_name)s\"")
class StorageError(IronicException):
_msg_fmt = _("Storage operation failure.")
class RedfishError(DriverOperationError):
_msg_fmt = _("Redfish exception occurred. Error: %(error)s")
class RedfishConnectionError(RedfishError):
_msg_fmt = _("Redfish connection failed for node %(node)s: %(error)s")
class PortgroupPhysnetInconsistent(IronicException):
_msg_fmt = _("Port group %(portgroup)s has member ports with inconsistent "
"physical networks (%(physical_networks)s). All ports in a "
"port group must have the same physical network.")
class VifInvalidForAttach(Conflict):
_msg_fmt = _("Unable to attach VIF %(vif)s to node %(node)s. Reason: "
"%(reason)s")
class AgentAPIError(IronicException):
_msg_fmt = _('Agent API for node %(node)s returned HTTP status code '
'%(status)s with error: %(error)s')
class NodeTraitNotFound(NotFound):
_msg_fmt = _("Node %(node_id)s doesn't have a trait '%(trait)s'")
class InstanceRescueFailure(IronicException):
_msg_fmt = _('Failed to rescue instance %(instance)s for node '
'%(node)s: %(reason)s')
class InstanceUnrescueFailure(IronicException):
_msg_fmt = _('Failed to unrescue instance %(instance)s for node '
'%(node)s: %(reason)s')
class XClarityError(IronicException):
_msg_fmt = _("XClarity exception occurred. Error: %(error)s")
class BIOSSettingAlreadyExists(Conflict):
_msg_fmt = _('A BIOS setting %(name)s for node %(node)s already exists.')
class BIOSSettingNotFound(NotFound):
_msg_fmt = _("Node %(node)s doesn't have a BIOS setting '%(name)s'")
class BIOSSettingListNotFound(NotFound):
_msg_fmt = _("Node %(node)s doesn't have BIOS settings '%(names)s'")
class DatabaseVersionTooOld(IronicException):
_msg_fmt = _("Database version is too old")
class AgentConnectionFailed(IronicException):
_msg_fmt = _("Connection to agent failed: %(reason)s")
|
py
|
1a56eca1896aa5a9a76708e6778511cec4423c7d
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class CertificateRegistrationProviderOperations(object):
"""CertificateRegistrationProviderOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: API Version. Constant value: "2015-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-08-01"
self.config = config
def list_operations(
self, custom_headers=None, raw=False, **operation_config):
"""Implements Csm operations Api to exposes the list of available Csm Apis
under the resource provider.
Implements Csm operations Api to exposes the list of available Csm Apis
under the resource provider.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of CsmOperationDescription
:rtype:
~azure.mgmt.web.models.CsmOperationDescriptionPaged[~azure.mgmt.web.models.CsmOperationDescription]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_operations.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.CsmOperationDescriptionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_operations.metadata = {'url': '/providers/Microsoft.CertificateRegistration/operations'}
|
py
|
1a56ed1f8667a05f7bbb651ef352eed41d8c254f
|
'''
Copyright 2016 Tom Kenter
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
'''
import theano
import theano.tensor as T
import lasagne
import numpy as np
import sys
import os
import re
import _pickle as cPickle
#import cPickle
def makeOutputFileName(oArgs, iNrOfEmbeddings, iEmbeddingSize):
sShareWeights = "sharedWeights" if oArgs.bShareWeights else "noSharedWeights"
sReg = "reg" if oArgs.bRegularize else "noReg"
sLower = "noLc" if oArgs.bDontLowercase else "lc"
sPreInit = "preInit" if oArgs.sWord2VecFile else "noPreInit"
sGradientClippingBound = "noGradClip" \
if oArgs.fGradientClippingBound is None \
else ("gradClip_%f" % oArgs.fGradientClippingBound).replace(".", "_")
sOutputFile = "%s_%s_%s_lr_%s_%s_epochs_%d_batch_%d_neg_%d_voc_%dx%d_%s_%s_%s.pickle" % \
(oArgs.sLastLayer,
sShareWeights,
oArgs.sUpdate,
re.sub("_?0*$", '', ("%f" % oArgs.fLearningRate).replace(".", "_")),
sGradientClippingBound,
oArgs.iEpochs,
oArgs.iBatchSize,
oArgs.iNeg,
iNrOfEmbeddings - 1, # -1, because of the 0-embedding
iEmbeddingSize,
sReg,
sLower,
sPreInit)
return os.path.join(oArgs.OUTPUT_DIR, sOutputFile)
def storeWordEmbeddings(sOutputFile, npaWordEmbeddings, oVocab, oArgs):
if oArgs.bVerbose:
print("Storing word embeddings to %s" % sOutputFile)
fhOut = open(sOutputFile, mode='wb')
dSCBOW = {"oArgs": oArgs,
"npaWordEmbeddings": npaWordEmbeddings,
"oVocab": oVocab
}
cPickle.dump(dSCBOW, fhOut)
fhOut.close()
class softMaxLayer_matrix(lasagne.layers.MergeLayer):
'''
First layer gives a vector (or a batch of vectors, really)
Second layer gives a matrix (well, a batch of matrices)
We return a vector of numbers, just as many as there are cols in the second
layer matrix (NOTE that the second input layer is a transposed version of
the layer before it)
'''
def __init__(self, incomings, iEmbeddingSize, **kwargs):
super(softMaxLayer, self).__init__(incomings, **kwargs)
self.iEmbeddingSize = iEmbeddingSize
def get_output_shape_for(self, input_shapes):
# input_shapes come like this:
# [(batch_size, vectors_size), (batch_size, rows, cols)]
return (input_shapes[0][0], input_shapes[1][1])
def get_output_for(self, inputs, **kwargs):
exps = T.exp((inputs[0].reshape((-1, self.iEmbeddingSize, 1)) * \
inputs[1]).sum(axis=1))
return exps / exps.sum(axis=1).dimshuffle((0, 'x'))
class softMaxLayer(lasagne.layers.Layer):
def __init__(self, incoming, **kwargs):
super(softMaxLayer, self).__init__(incoming, **kwargs)
def get_output_shape_for(self, input_shape):
'''
The input is just a vector of numbers.
The output is also a vector, same size as the input.
'''
return input_shape
def get_output_for(self, input, **kwargs):
'''
Take the exp() of all inputs, and divide by the total.
'''
exps = T.exp(input)
return exps / exps.sum(axis=1).dimshuffle((0, 'x'))
class sigmoidLayer(lasagne.layers.MergeLayer):
'''
First layer gives a vector (or a batch of vectors, really)
Second layer gives a matrix (well, a batch of matrices)
We return a vector of numbers, just as many as there are cols in the second
layer matrix (NOTE that the second input layer is a transposed version of
the layer before it)
'''
def __init__(self, incomings, iEmbeddingSize, **kwargs):
super(sigmoidLayer, self).__init__(incomings, **kwargs)
self.iEmbeddingSize = iEmbeddingSize
def get_output_shape_for(self, input_shapes):
# input_shapes come like this:
# [(batch_size, vectors_size), (batch_size, rows, cols)]
return (input_shapes[0][0], input_shapes[1][1])
def get_output_for(self, inputs, **kwargs):
'''
We want a dot product of every row in inputs[0] (a vector) with every
row in inputs[1] (a matrix).
We do this 'by hand': we do a element-wise multiplication of every vector
in inputs[0] with every matrix in inputs[1], and sum the result.
'''
dots = (inputs[0].reshape((-1, self.iEmbeddingSize, 1)) * \
inputs[1]).sum(axis=1)
# Take the sigmoid
return 1.0 / (1.0 + T.exp(dots))
class cosineLayer(lasagne.layers.MergeLayer):
'''
First layer gives a vector (or a batch of vectors, really)
Second layer gives a matrix (well, a batch of matrices)
We return a vector of numbers, just as many as there are cols in the second
layer matrix (NOTE that the second input layer is a transposed version of
the layer before it)
'''
def __init__(self, incomings, iEmbeddingSize, **kwargs):
super(cosineLayer, self).__init__(incomings, **kwargs)
self.iEmbeddingSize = iEmbeddingSize
def get_output_shape_for(self, input_shapes):
# input_shapes come like this:
# [(batch_size, vectors_size), (batch_size, rows, cols)]
return (input_shapes[0][0], input_shapes[1][1])
def get_output_for(self, inputs, **kwargs):
'''
We want a dot product of every row in inputs[0] (a vector) with every
row in inputs[1] (a matrix).
We do this 'by hand': we do a element-wise multiplication of every vector
in inputs[0] with every matrix in inputs[1], and sum the result.
'''
dots = (inputs[0].reshape((-1, self.iEmbeddingSize, 1)) * \
inputs[1]).sum(axis=1)
# Make sure the braodcasting is right
norms_1 = T.sqrt(T.square(inputs[0]).sum(axis=1)).dimshuffle(0, 'x')
# NOTE that the embeddings are transposed in the previous layer
norms_2 = T.sqrt(T.square(inputs[1]).sum(axis=1))
norms = norms_1 * norms_2
return dots / norms
class averageLayer(lasagne.layers.Layer):
def __init__(self, incoming, fGradientClippingBound=None, **kwargs):
super(averageLayer, self).__init__(incoming, **kwargs)
self.fGradientClippingBound = fGradientClippingBound
def get_output_shape_for(self, input_shape):
'''
The input is a batch of word vectors.
The output is a single vector, same size as the input word embeddings
In other words, since we are averaging, we loose the penultimate dimension
'''
return (input_shape[0], input_shape[2])
def get_output_for(self, input, **kwargs):
'''
The input is a batch of word vectors.
The output the sum of the word embeddings divided by the number of
non-null word embeddings in the input.
What we do with the normalizers is, we go from
[[[.01, .02, .03], # Word embedding sentence 1, word 1
[.02, .3, .01], # Word embedding sentence 1, word 2
[.0, .0, .0]],
[[.05, .06, .063], # Word embedding sentence 2, word 1
[.034,.45, .05],
[.01, .001, .03]],
...
]
first to (so that is the inner non-zero sum(axis=2) part):
[[3, 3, 0], # Number of non-zero components per vector in sentence 1
[3, 3, 3], # Number of non-zero components per vector in sentence 1
...
]
and finally to (so that is the outer non-zero sum(axis=1) part):
[2, 3, ...]
and we reshape that to:
[[2], # Number of words in sentence 1
[3], # Number of words in sentence 2
...]
'''
# Sums of word embeddings (so the zero embeddings don't matter here)
sums = input.sum(axis=1)
# Can we do this cheaper (as in, more efficient)?
# NOTE that we explicitly cast the output of the last sum() to floatX
# as otherwise Theano will cast the result of 'sums / normalizers' to
# float64
normalisers = T.neq((T.neq(input, 0.0)).sum(axis=2, dtype='int32'), 0.0).sum(axis=1, dtype='floatX').reshape((-1, 1))
averages = sums / normalisers
if self.fGradientClippingBound is not None:
averages = theano.gradient.grad_clip(averages,
- self.fGradientClippingBound,
self.fGradientClippingBound)
return averages
class averageLayer_matrix(lasagne.layers.Layer):
def __init__(self, incoming, iNrOfSentences=None,
fGradientClippingBound=None, **kwargs):
super(averageLayer_matrix, self).__init__(incoming, **kwargs)
self.iNrOfSentences = iNrOfSentences
self.fGradientClippingBound = fGradientClippingBound
def get_output_shape_for(self, input_shape):
'''
The input is a batch of matrices of word vectors.
The output is a batch of vectors, one for each matrix, the same size as
the input word embeddings
In other words, since we are averaging, we loose the penultimate dimension
'''
return (input_shape[0], input_shape[1], input_shape[3])
def get_output_for(self, input, **kwargs):
'''
The input is a batch of matrices of word vectors.
The output the sum of the word embeddings divided by the number of
non-zero word embeddings in the input.
The idea with the normalisers is similar as in the normal averageLayer
'''
# Sums of word embeddings (so the zero embeddings don't matter here)
sums = input.sum(axis=2)
# Can we do this cheaper (as in, more efficient)?
# NOTE that we explicitly cast the output of the last sum() to floatX
# as otherwise Theano will cast the result of 'sums / normalizers' to
# float64
normalisers = T.neq((T.neq(input, 0.0)).sum(axis=3, dtype='int32'), 0.0).sum(axis=2, dtype='floatX').reshape((-1, self.iNrOfSentences, 1))
averages = sums / normalisers
if self.fGradientClippingBound is not None:
averages = theano.gradient.grad_clip(averages,
- self.fGradientClippingBound,
self.fGradientClippingBound)
return averages
class gateLayer(lasagne.layers.MergeLayer):
def __init__(self, incomings, **kwargs):
super(gateLayer, self).__init__(incomings, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[1]
def get_output_for(self, inputs, **kwargs):
'''
First layer is a batch of embedding indices:
[[11,21,43,0,0],
[234,543,0,0,0,],
...
]
Second layer are the embeddings:
[ [[.02, .01...],
[.004, .005, ...],
...,
.0 .0 .0 ... ,
.0 .0 .0 ...],
[[...],
....
]
]
'''
return \
T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,'x')) * inputs[1]
class gateLayer_matrix(lasagne.layers.MergeLayer):
def __init__(self, incomings, **kwargs):
super(gateLayer_matrix, self).__init__(incomings, **kwargs)
def get_output_shape_for(self, input_shapes):
return input_shapes[1]
def get_output_for(self, inputs, **kwargs):
'''
First layer is a batch of matrices of embedding indices:
Second layer are the corresponding embeddings:
'''
return \
T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,2,'x')) * inputs[1]
class flipLayer(lasagne.layers.Layer):
'''
Flip the word embeddings of the negative examples.
So the word embeddings <we> of the negative examples will be become <-we>
'''
def __init__(self, incoming, iPos=None, iNrOfSentences=None, **kwargs):
super(flipLayer, self).__init__(incoming, **kwargs)
# Set all the values to -1
npaFlipper = np.ones(iNrOfSentences, dtype=np.int8) * -1
# Except for the first one/two (the positive examples)
npaFlipper[0:iPos] = 1
# Set the broadcasting right
self.flipper = theano.shared(npaFlipper).dimshuffle('x', 0, 'x', 'x')
def get_output_shape_for(self, input_shape):
return input_shape
def get_output_for(self, input, **kwargs):
return input * self.flipper
def preInit(tWeightShape, oW2v, oVocab):
assert tWeightShape == (oW2v.syn0.shape[0] + 1, oW2v.syn0.shape[1])
# Copy the embeddings
W = np.empty(tWeightShape, dtype=np.float32)
# NOTE that we start at 1 here (rather than 0)
W[1:tWeightShape[0],:] = oW2v.syn0
# Make a corresponding vocabulary
# We start at index 1 (0 is a dummy 0.0 embedding)
for i in range(oW2v.syn0.shape[0]):
sWord = oW2v.index2word[i]
iVocabIndex = i+1
oVocab.dVocab[sWord] = iVocabIndex
oVocab.dIndex2word[iVocabIndex] = sWord
return W
def nextBatch(oSentenceIterator, funcRandomIterator, oVocab=None,
npaBatch_1=None, npaBatch_2=None, iMaxNrOfTokens=None,
iBatchSize=None, iPos=None, iNeg=None):
'''
This function gives back a batch to train/test on.
It needs:
- a sentence iterator object that yields a triple of sentences:
(sentence n, sentence n-1, sentence n+1)
which are next to one another in the corpus.
These are considered positive examples.
- a sentence iterator that yields random sentences (so single sentences) from
the corpus. These are used as negative examples.
- the vocabulary object is usually empty
npaBatch_1 and npaBatch_2 should be pre-allocated arrays of size:
npaBatch_1: (iBatchSize, iMaxNrOfTokens)
npaBatch_2: (iBatchSize, iNeg + iPos, iMaxNrOfTokens)
'''
npaBatch_1[:] = 0.0 # Set the pre-allocated arrays to 0 again
npaBatch_2[:] = 0.0
iSentencePairsSampled = 0
# NOTE that because of how we do things, the last batch isn't included if
# it's smaller than the batch size
for tSentenceTuple in oSentenceIterator:
# NOTE in the toronto case, the sentence iterator already yields tokens
isTorontoFormat= oSentenceIterator.sName == "torontoSentenceIterator" or \
oSentenceIterator.sName =="week"
aWeIndices1 = \
[oVocab[sToken] for sToken in tSentenceTuple[0] \
if oVocab[sToken] is not None] \
if isTorontoFormat else \
[oVocab[sToken] for sToken in tSentenceTuple[0].split(' ') \
if oVocab[sToken] is not None]
aWeIndices2 = \
[oVocab[sToken] for sToken in tSentenceTuple[1] \
if oVocab[sToken] is not None] \
if isTorontoFormat else \
[oVocab[sToken] for sToken in tSentenceTuple[1].split(' ') \
if oVocab[sToken] is not None]
aWeIndices3 = None
if iPos == 2:
aWeIndices3 = \
[oVocab[sToken] for sToken in tSentenceTuple[2] \
if oVocab[sToken] is not None] \
if isTorontoFormat else \
[oVocab[sToken] for sToken in tSentenceTuple[2].split(' ') \
if oVocab[sToken] is not None]
# We only deal with triples all of which members contain at least one known
# word
if (len(aWeIndices1) == 0) or (len(aWeIndices2) == 0) or \
((iPos == 2) and (len(aWeIndices3) == 0)):
continue
npaBatch_1[iSentencePairsSampled][0:min(len(aWeIndices1),iMaxNrOfTokens)]=\
aWeIndices1[:iMaxNrOfTokens]
npaBatch_2[iSentencePairsSampled][0][0:min(len(aWeIndices2),iMaxNrOfTokens)] = aWeIndices2[:iMaxNrOfTokens]
if iPos == 2:
npaBatch_2[iSentencePairsSampled][1][0:min(len(aWeIndices3),iMaxNrOfTokens)] = aWeIndices3[:iMaxNrOfTokens]
iRandomSamples = 0
while 1: # We break from inside the loop
if iRandomSamples == iNeg: # So if iNeg == 0, we break right away
break
aWeIndicesRandom = []
while len(aWeIndicesRandom) == 0: # Get a non-empty random sentence
# NOTE that randomSentence is a list of tokens in the Toronto case
randomSentence = next(funcRandomIterator)
aWeIndicesRandom = \
[oVocab[sToken] for sToken in randomSentence \
if oVocab[sToken] is not None] \
if isTorontoFormat \
else [oVocab[sToken] for sToken in randomSentence.split(' ') \
if oVocab[sToken] is not None]
iRandomSamples += 1
npaBatch_2[iSentencePairsSampled][(iPos-1)+iRandomSamples][0:min(len(aWeIndicesRandom),iMaxNrOfTokens)] = aWeIndicesRandom[:iMaxNrOfTokens]
iSentencePairsSampled += 1
if iSentencePairsSampled == iBatchSize:
# Just yield something (npaBatch_1, npaBatch_2 are filled already)
yield 1
# Reset
iSentencePairsSampled = 0
def build_scbow(oArgs, iPos=None, oW2v=None, oVocab=None, tWeightShape=None):
# Input variable for a batch of sentences (so: sentence n)
input_var_1 = T.matrix('input_var_1', dtype='uint32')
# Input variable for a batch of positive and negative examples
# (so sentence n-1, sentence n+1, neg1, neg2, ...)
input_var_2 = T.tensor3('input_var_2', dtype='uint32')
W_init_1, W_init_2 = None, None
# First embedding input layer
llIn_1 = lasagne.layers.InputLayer(shape=(None, oArgs.iMaxNrOfTokens),
input_var=input_var_1,
name='llIn_1')
# Second embedding input layer
llIn_2 = lasagne.layers.InputLayer(shape=(None, iPos + oArgs.iNeg,
oArgs.iMaxNrOfTokens),
input_var=input_var_2,
name='llIn_2')
W_init_1 = None
if oW2v is None:
W_init_1 = lasagne.init.Normal().sample(tWeightShape)
else: ## Here is the pre-initialization
W_init_1 = preInit(tWeightShape, oW2v, oVocab)
W_init_1[0,:] = 0.0
# First embedding layer
llEmbeddings_1 = lasagne.layers.EmbeddingLayer(
llIn_1,
input_size=tWeightShape[0],
output_size=tWeightShape[1],
W=W_init_1,
name='llEmbeddings_1')
llGate_1 = gateLayer([llIn_1, llEmbeddings_1], name='llGate_1')
llAverage_1 = averageLayer(llGate_1,
fGradientClippingBound=oArgs.fGradientClippingBound,
name='llAverage_1')
W_init_2 = None
if not oArgs.bShareWeights:
if oW2v is None:
W_init_2 = lasagne.init.Normal().sample(tWeightShape)
else: # We are not sharing, but we are pre-initializing
preInit(W_init_2, oW2v, oVocab)
W_init_2[0,:] = 0.0
# Second embedding layer, the weights tied with the first embedding layer
llEmbeddings_2 = lasagne.layers.EmbeddingLayer(
llIn_2,
input_size=tWeightShape[0],
output_size=tWeightShape[1],
W=llEmbeddings_1.W if oArgs.bShareWeights else W_init_2,
name='llEmbeddings_2')
llGate_2 = gateLayer_matrix([llIn_2, llEmbeddings_2], name='llGate_2')
llAverage_2 = None
if oArgs.sLastLayer == 'cosine':
llAverage_2 = \
averageLayer_matrix(llGate_2, iNrOfSentences=iPos + oArgs.iNeg,
fGradientClippingBound=\
oArgs.fGradientClippingBound,
name="llAverage_2")
else:
llFlip_2 = flipLayer(llGate_2, iPos=iPos, iNrOfSentences=iPos + oArgs.iNeg,
name='llFlip_2')
llAverage_2 = \
averageLayer_matrix(llFlip_2, iNrOfSentences=iPos + oArgs.iNeg,
fGradientClippingBound=\
oArgs.fGradientClippingBound,
name="llAverage_2")
llTranspose_2 = lasagne.layers.DimshuffleLayer(llAverage_2, (0,2,1),
name='llTranspose_2')
llFinalLayer = None
if oArgs.sLastLayer == 'cosine':
llCosine = cosineLayer([llAverage_1, llTranspose_2], tWeightShape[1],
name='llCosine')
llFinalLayer = softMaxLayer(llCosine, name='llSoftMax')
else:
llFinalLayer = sigmoidLayer([llAverage_1, llTranspose_2], tWeightShape[1],
name='llSigmoid')
### That was all the network stuff
### Now let's build the functions
# Target var if needed
target_var = T.fmatrix('targets') if oArgs.sLastLayer == "cosine" else None
if oArgs.bVerbose:
print("Building prediction functions")
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(llFinalLayer)
if oArgs.bVerbose:
print("Building loss functions")
# For checking/debugging
forward_pass_fn = theano.function([input_var_1, input_var_2], prediction)
loss = None
if oArgs.sLastLayer == 'cosine':
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
else: # sigmoid
loss = - T.log(prediction).sum(axis=1)
if oArgs.bRegularize:
l2_penalty = regularize_layer_params(llFinalLayer, l2)
loss = loss + l2_penalty
loss = loss.mean()
if oArgs.bVerbose:
print("Building update functions")
params = lasagne.layers.get_all_params(llFinalLayer, trainable=True)
fStartLearningRate = np.float32(oArgs.fLearningRate)
thsLearningRate = None
updates = None
if oArgs.sUpdate == 'nesterov':
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=oArgs.fLearningRate,
momentum=oArgs.fMomentum)
elif oArgs.sUpdate == 'adamax':
updates = lasagne.updates.adamax(loss, params,
learning_rate=oArgs.fLearningRate)
elif oArgs.sUpdate == 'adadelta':
updates = lasagne.updates.adadelta(loss, params,
learning_rate=oArgs.fLearningRate)
elif oArgs.sUpdate == "sgd":
# This doesn't work with INEX for now...
thsLearningRate = theano.shared(fStartLearningRate)
updates = lasagne.updates.sgd(loss, params, learning_rate=thsLearningRate)
if oArgs.bVerbose:
print("Building training function")
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = None
if oArgs.sLastLayer == "cosine":
train_fn = theano.function([input_var_1, input_var_2, target_var],
loss,
updates=updates)
else:
train_fn = theano.function([input_var_1, input_var_2],
loss,
updates=updates)
return llFinalLayer, forward_pass_fn, thsLearningRate, train_fn
def updateLearningRate(thsLearningRate, iNrOfBatchesSoFar, fTotalNrOfBatches,
oArgs):
fNewLearningRate = \
max(oArgs.fLearningRate * 0.0001,
oArgs.fLearningRate * (1.0 - (iNrOfBatchesSoFar / fTotalNrOfBatches))
)
thsLearningRate.set_value(fNewLearningRate)
if oArgs.bVeryVerbose:
print("Batch %d of %.0f" % (iNrOfBatchesSoFar, fTotalNrOfBatches))
print("Learning rate: %f" % thsLearningRate.get_value())
def parseArguments():
import argparse
oArgsParser = argparse.ArgumentParser(description='Siamese CBOW')
oArgsParser.add_argument('DATA',
help="File (in PPDB case) or directory (in Toronto Book Corpus and INEX case) to read the data from. NOTE that the program runs in aparticular input mode (INEX/PPDB/TORONTO) which is deduced from the directory/file name)")
oArgsParser.add_argument('OUTPUT_DIR',
help="A file to store the final and possibly intermediate word embeddings to (in cPickle format)")
oArgsParser.add_argument('-batch_size', metavar="INT", dest="iBatchSize",
help="Batch size. Default: 1",
type=int, action="store", default=1)
oArgsParser.add_argument('-dont_lowercase', dest='bDontLowercase',
help="By default, all input text is lowercased. Use this option to prevent this.",
action='store_true')
oArgsParser.add_argument('-dry_run', dest="bDryRun",
help="Build the network, print some statistics (if -v is on) and quit before training starts.",
action="store_true")
oArgsParser.add_argument('-embedding_size', metavar="INT",
dest="iEmbeddingSize",
help="Dimensionality of the word embeddings. Default: 300",
type=int, action="store", default=300)
oArgsParser.add_argument('-epochs', metavar="INT", dest="iEpochs",
help="Maximum number of epochs for training. Default: 10",
type=int, action="store", default=10)
oArgsParser.add_argument('-gradient_clipping_bound', metavar="FLOAT",
dest="fGradientClippingBound",
help="Gradient clipping bound (so gradients will be clipped to [-FLOAT, +FLOAT]).",
type=float, action="store")
oArgsParser.add_argument('-last_layer', metavar="LAYER",
dest="sLastLayer",
help="Last layer is 'cosine' or 'sigmoid'. NOTE that this choice also determines the loss function (binary cross entropy or negative sampling loss, respectively). Default: cosine",
action="store", default='cosine',
choices=['cosine', 'sigmoid'])
oArgsParser.add_argument('-learning_rate', metavar="FLOAT",
dest="fLearningRate",
help="Learning rate. Default: 1.0",
type=float, action="store", default=1.0)
oArgsParser.add_argument('-max_nr_of_tokens', metavar="INT",
dest="iMaxNrOfTokens",
help="Maximum number of tokens considered per sentence. Default: 50",
type=int, action="store", default=50)
oArgsParser.add_argument('-max_nr_of_vocab_words', metavar="INT",
dest="iMaxNrOfVocabWords",
help="Maximum number of words considered. If this is not specified, all words are considered",
type=int, action="store")
oArgsParser.add_argument('-momentum', metavar="FLOAT",
dest="fMomentum",
help="Momentum, only applies when 'nesterov' is used as update method (see -update). Default: 0.0",
type=float, action="store", default=0.0)
oArgsParser.add_argument('-neg', metavar="INT", dest="iNeg",
help="Number of negative examples. Default: 1",
type=int, action="store", default=1)
oArgsParser.add_argument('-regularize', dest="bRegularize",
help="Use l2 normalization on the parameters of the network",
action="store_true")
oArgsParser.add_argument('-share_weights', dest="bShareWeights",
help="Turn this option on (a good idea in general) for the embedding weights of the input sentences and the other sentences to be shared.",
action="store_true")
oArgsParser.add_argument('-start_storing_at', metavar="INT",
dest="iStartStoringAt",
help="Start storing embeddings at epoch number INT. Default: 0. I.e. start storing right away (if -store_at_epoch is on, that is)",
action="store", type=int, default=0)
oArgsParser.add_argument('-store_at_batch', metavar="INT",
dest="iStoreAtBatch",
help="Store embeddings every INT batches.",
action="store", type=int, default=None)
oArgsParser.add_argument('-store_at_epoch', dest="iStoreAtEpoch",
metavar="INT",
help="Store embeddings every INT epochs (so 1 for storing at the end of every epoch, 10 for for storing every 10 epochs, etc.).",
action="store", type=int)
oArgsParser.add_argument('-update', metavar="UPDATE_ALGORITHM",
dest="sUpdate",
help="Update algorithm. Options are 'adadelta', 'adamax', 'nesterov' (which uses momentum) and 'sgd'. Default: 'adadelta'",
action="store", default='adadelta',
choices=['adadelta', 'adamax', 'sgd',
'nesterov'])
oArgsParser.add_argument("-v", dest="bVerbose", action="store_true",
help="Be verbose")
oArgsParser.add_argument('-vocab', dest="sVocabFile", metavar="FILE",
help="A vocabulary file is simply a file, SORTED BY FREQUENCY of frequence<SPACE>word lines. You can take the top n of these (which is why it should be sorted by frequency). See -max_nr_of_vocab_words.",
action="store")
oArgsParser.add_argument("-vv", dest="bVeryVerbose", action="store_true",
help="Be very verbose")
oArgsParser.add_argument('-w2v', dest="sWord2VecFile", metavar="FILE",
help="A word2vec model can be used to initialize the weights for words in the vocabulary file from (missing words just get a random embedding). If the weights are not initialized this way, they will be trained from scratch.",
action="store")
oArgsParser.add_argument('-wk',dest="week",action='store')
oArgs = oArgsParser.parse_args()
if (oArgs.sVocabFile is None) and (oArgs.sWord2VecFile is None):
print >>sys.stderr, "[ERROR] Please specify either a word2vec file or a vocab file"
exit(1)
if oArgs.bVeryVerbose: # If we are very verbose, we are also just verbose
oArgs.bVerbose=True
return oArgs
if __name__ == "__main__":
oArgs = parseArguments()
iPos=2
# Prepare Theano variables for inputs and targets
# Input variable for a batch of left sentences
input_var_1 = T.matrix('input_var_1', dtype='int32')
# Input variable for a batch of right sentences, plus negative examples
input_var_2 = T.tensor3('input_var_2', dtype='int32')
target_var = T.fmatrix('targets') if oArgs.sLastLayer == "cosine" else None
npaWordEmbeddings = np.array([[.1, .2, .3, .4],
[.2, .3, .4, 5],
[-.7, -.4, -.5, -.6],
[-.8, -.9, -.45, -.56],
[.2131, .213, .434, .652]]
).astype(np.float32)
dModel = None
if oArgs.sStoredModel is not None:
import cPickle
fhFile = open(oArgs.sStoredModel, mode='rb')
dModel = cPickle.load(fhFile)
fhFile.close()
npaWordEmbeddings = dModel['npaWordEmbeddings']
npaTargets = np.zeros((oArgs.iBatchSize, oArgs.iNeg + iPos),
dtype=np.float32)
if iPos == 2:
npaTargets[:,[0,1]] = .5
else:
npaTargets[:,0] = 1.0
iNrOfEmbeddings, iEmbeddingSize = npaWordEmbeddings.shape
npaInput_1 = np.array([[0,1], [0,1], [1,0]]).astype('int32')
npaInput_2 = np.array([[2,1], [3,2], [1,0]]).astype('int32')
npaInput_3 = np.array([[2,3], [1,2], [1,4]]).astype('int32')
iMaxNrOfTokens = 2
network = build_scbow(input_var_1, input_var_2,
iBatchSize=oArgs.iBatchSize,
iPos=iPos,
iNeg=oArgs.iNeg, iMaxNrOfTokens=iMaxNrOfTokens,
tWeightShape=npaWordEmbeddings.shape,
npaWordEmbeddings=npaWordEmbeddings,
sLastLayer=oArgs.sLastLayer,
bVerbose=oArgs.bVerbose)
prediction = lasagne.layers.get_output(network)
forward_pass_fn = theano.function([input_var_1, input_var_2],
prediction)
# We want to maximize the sum of the log probabilities, so we want to
# minimize this loss objective
# NOTE that we expect the word embeddings of the negative examples to be
# reversed (as in: -1 * word embedding)
npaLossBoost = np.ones(oArgs.iNeg + iPos, dtype=np.float32)
#npaLossBoost[0:iPos] = oArgs.fLossBoost
loss = None
if oArgs.sLastLayer == 'cosine':
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
else: # sigmoid
loss = - (T.log(prediction) * npaLossBoost).sum(axis=1)
loss_fn = theano.function([prediction, target_var], loss)
# Pre-allocate memory
npaBatch_1 = np.zeros((oArgs.iBatchSize, iMaxNrOfTokens),
dtype=np.int8)
npaBatch_2 = np.zeros((oArgs.iBatchSize, oArgs.iNeg + iPos, iMaxNrOfTokens),
dtype=np.int8)
# Check that the network is producing anything
for i in moetNog(npaInput_1, npaInput_2, npaInput_3,
npaBatch_1, npaBatch_2,
iNeg=oArgs.iNeg, iBatchSize=oArgs.iBatchSize,
bShuffle=False):
# Check the batch itself
print("Batch (1):\n%s\n (2)\n%s" % (npaBatch_1, npaBatch_2))
npaPredictions = forward_pass_fn(npaBatch_1, npaBatch_2)
print("Predictions (%s):\n%s" % (npaPredictions[0].dtype, npaPredictions))
L = loss_fn(npaPredictions, npaTargets)
print("Loss: %s" % L)
|
py
|
1a56ed85b95d2480b32f7aeadbbf759ac78cedd3
|
import enum
import logging
from typing import Any, Optional
from plugins.costume_loader_pkg.backend.taxonomy import Taxonomy, TaxonomyType
"""
This class is an enum for all attributes.
An attribute is a special property that is used
to describe one characteristic of an entity.
For example, a costume has the attribute
"Dominante Farbe". The attribute should not be
confused with TaxonomyType, which is the enum
for all possible taxonomies in the database.
"""
class Attribute(enum.Enum):
ortsbegebenheit = "ortsbegebenheit"
dominanteFarbe = "dominanteFarbe"
stereotypRelevant = "stereotypRelevant"
dominanteFunktion = "dominanteFunktion"
dominanterZustand = "dominanterZustand"
dominanteCharaktereigenschaft = "dominanteCharaktereigenschaft"
stereotyp = "stereotyp"
geschlecht = "geschlecht"
dominanterAlterseindruck = "dominanterAlterseindruck"
genre = "genre"
rollenberuf = "rollenberuf"
dominantesAlter = "dominantesAlter"
rollenrelevanz = "rollenrelevanz"
spielzeit = "spielzeit"
tageszeit = "tageszeit"
koerpermodifikation = "koerpermodifikation"
kostuemZeit = "kostuemZeit"
familienstand = "familienstand"
charaktereigenschaft = "charaktereigenschaft"
spielort = "spielort"
spielortDetail = "spielortDetail"
alterseindruck = "alterseindruck"
alter = "alter"
basiselement = "basiselement"
design = "design"
form = "form"
trageweise = "trageweise"
zustand = "zustand"
funktion = "funktion"
material = "material"
materialeindruck = "materialeindruck"
farbe = "farbe"
farbeindruck = "farbeindruck"
farbkonzept = "farbkonzept"
# German names for attributes
"""
@staticmethod
def get_name(attribute) -> str:
if attribute == Attribute.ortsbegebenheit:
return "Ortsbegebenheit"
elif attribute == Attribute.dominanteFarbe:
return "Dominante Farbe"
elif attribute == Attribute.stereotypRelevant:
return "Stereotyp relevant"
elif attribute == Attribute.dominanteFunktion:
return "Dominante Funktion"
elif attribute == Attribute.dominanterZustand:
return "Dominanter Zustand"
elif attribute == Attribute.dominanteCharaktereigenschaft:
return "Dominante Charaktereigenschaft"
elif attribute == Attribute.stereotyp:
return "Stereotyp"
elif attribute == Attribute.geschlecht:
return "Geschlecht"
elif attribute == Attribute.dominanterAlterseindruck:
return "Dominanter Alterseindruck"
elif attribute == Attribute.genre:
return "Genre"
elif attribute == Attribute.rollenberuf:
return "Rollenberuf"
elif attribute == Attribute.dominantesAlter:
return "Dominantes Alter"
elif attribute == Attribute.rollenrelevanz:
return "Rollenrelevanz"
elif attribute == Attribute.spielzeit:
return "Spielzeit"
elif attribute == Attribute.tageszeit:
return "Tageszeit"
elif attribute == Attribute.koerpermodifikation:
return "Körpermodifikation"
elif attribute == Attribute.kostuemZeit:
return "Kostümzeit"
elif attribute == Attribute.familienstand:
return "Familienstand"
elif attribute == Attribute.charaktereigenschaft:
return "Charaktereigenschaft"
elif attribute == Attribute.spielort:
return "Spielort"
elif attribute == Attribute.spielortDetail:
return "SpielortDetail"
elif attribute == Attribute.alterseindruck:
return "Alterseindruck"
elif attribute == Attribute.alter:
return "Alter"
elif attribute == Attribute.basiselement:
return "Basiselement"
elif attribute == Attribute.design:
return "Design"
elif attribute == Attribute.form:
return "Form"
elif attribute == Attribute.trageweise:
return "Trageweise"
elif attribute == Attribute.zustand:
return "Zustand"
elif attribute == Attribute.funktion:
return "Funktion"
elif attribute == Attribute.material:
return "Material"
elif attribute == Attribute.materialeindruck:
return "Materialeindruck"
elif attribute == Attribute.farbe:
return "Farbe"
elif attribute == Attribute.farbeindruck:
return "Farbeindruck"
elif attribute == Attribute.farbkonzept:
return "Farbkonzept"
else:
Logger.error("No name for attribute \"" + str(attribute) + "\" specified")
raise ValueError("No name for attribute \"" + str(attribute) + "\" specified")
return
"""
# englisch names for attributes
@staticmethod
def get_name(attribute) -> str:
"""
Returns the human representable name for the
given Attribute.
"""
if attribute == Attribute.ortsbegebenheit:
return "Location"
elif attribute == Attribute.dominanteFarbe:
return "Dominant Color"
elif attribute == Attribute.stereotypRelevant:
return "Stereotyp Relevant"
elif attribute == Attribute.dominanteFunktion:
return "Dominant Function"
elif attribute == Attribute.dominanterZustand:
return "Dominant Condition"
elif attribute == Attribute.dominanteCharaktereigenschaft:
return "Dominant Character Trait"
elif attribute == Attribute.stereotyp:
return "Stereotype"
elif attribute == Attribute.geschlecht:
return "Gender"
elif attribute == Attribute.dominanterAlterseindruck:
return "Dominant Age Impression"
elif attribute == Attribute.genre:
return "Genre"
elif attribute == Attribute.rollenberuf:
return "Profession"
elif attribute == Attribute.dominantesAlter:
return "Dominant Age"
elif attribute == Attribute.rollenrelevanz:
return "Role Relevance"
elif attribute == Attribute.spielzeit:
return "Time of Setting"
elif attribute == Attribute.tageszeit:
return "Time of Day"
elif attribute == Attribute.koerpermodifikation:
return "Body Modification"
elif attribute == Attribute.kostuemZeit:
return "Costume Time"
elif attribute == Attribute.familienstand:
return "Marital Status"
elif attribute == Attribute.charaktereigenschaft:
return "Character Trait"
elif attribute == Attribute.spielort:
return "Venue"
elif attribute == Attribute.spielortDetail:
return "Venue Detail"
elif attribute == Attribute.alterseindruck:
return "Age Impression"
elif attribute == Attribute.alter:
return "Age"
elif attribute == Attribute.basiselement:
return "Base Element"
elif attribute == Attribute.design:
return "Design"
elif attribute == Attribute.form:
return "Form"
elif attribute == Attribute.trageweise:
return "Way of Wearing"
elif attribute == Attribute.zustand:
return "Condition"
elif attribute == Attribute.funktion:
return "Function"
elif attribute == Attribute.material:
return "Material"
elif attribute == Attribute.materialeindruck:
return "Material Impression"
elif attribute == Attribute.farbe:
return "Color"
elif attribute == Attribute.farbeindruck:
return "Color Impression"
elif attribute == Attribute.farbkonzept:
return "Color Concept"
else:
logging.error('No name for attribute "' + str(attribute) + '" specified')
raise ValueError('No name for attribute "' + str(attribute) + '" specified')
@staticmethod
def get_taxonomy_type(attribute) -> Optional[TaxonomyType]:
"""
Returns the corresponding taxonomy type
this attribute is used for.
Note, that an attribute has only one taxonomy type,
while a taxonomy type can be used by multiple attributes.
"""
if attribute == Attribute.ortsbegebenheit:
return TaxonomyType.ortsbegebenheit
elif attribute == Attribute.dominanteFarbe:
return TaxonomyType.farbe
elif attribute == Attribute.stereotypRelevant:
return TaxonomyType.stereotypRelevant
elif attribute == Attribute.dominanteFunktion:
return TaxonomyType.funktion
elif attribute == Attribute.dominanterZustand:
return TaxonomyType.zustand
elif attribute == Attribute.dominanteCharaktereigenschaft:
return TaxonomyType.typus
elif attribute == Attribute.stereotyp:
return TaxonomyType.stereotyp
elif attribute == Attribute.geschlecht:
return TaxonomyType.geschlecht
elif attribute == Attribute.dominanterAlterseindruck:
return TaxonomyType.alterseindruck
elif attribute == Attribute.genre:
return TaxonomyType.genre
elif attribute == Attribute.rollenberuf:
return TaxonomyType.rollenberuf
elif attribute == Attribute.dominantesAlter:
return None
elif attribute == Attribute.rollenrelevanz:
return TaxonomyType.rollenrelevanz
elif attribute == Attribute.spielzeit:
return TaxonomyType.spielzeit
elif attribute == Attribute.tageszeit:
return TaxonomyType.tageszeit
elif attribute == Attribute.koerpermodifikation:
return TaxonomyType.koerpermodifikation
elif attribute == Attribute.kostuemZeit:
return None
elif attribute == Attribute.familienstand:
return TaxonomyType.familienstand
elif attribute == Attribute.charaktereigenschaft:
return TaxonomyType.charaktereigenschaft
elif attribute == Attribute.spielort:
return TaxonomyType.spielort
elif attribute == Attribute.spielortDetail:
return TaxonomyType.spielortDetail
elif attribute == Attribute.alterseindruck:
return TaxonomyType.alterseindruck
elif attribute == Attribute.alter:
return None
elif attribute == Attribute.basiselement:
return TaxonomyType.basiselement
elif attribute == Attribute.design:
return TaxonomyType.design
elif attribute == Attribute.form:
return TaxonomyType.form
elif attribute == Attribute.trageweise:
return TaxonomyType.trageweise
elif attribute == Attribute.zustand:
return TaxonomyType.zustand
elif attribute == Attribute.funktion:
return TaxonomyType.funktion
elif attribute == Attribute.material:
return TaxonomyType.material
elif attribute == Attribute.materialeindruck:
return TaxonomyType.materialeindruck
elif attribute == Attribute.farbe:
return TaxonomyType.farbe
elif attribute == Attribute.farbeindruck:
return TaxonomyType.farbeindruck
elif attribute == Attribute.farbkonzept:
return TaxonomyType.farbkonzept
else:
logging.error(
'No taxonomy type for attribute "' + str(attribute) + '" specified'
)
raise ValueError(
'No taxonomy type for attribute "' + str(attribute) + '" specified'
)
|
py
|
1a56ed91553c9ebab54fb3e9b6d408f5859414b0
|
# -*- coding: utf-8 -*-
"""
<application name>
Copyright ©<year> <author>
Licensed under the terms of the <LICENSE>.
See LICENSE for details.
@author: <author>
"""
# Setup PyQt's v2 APIs
import sip
API_NAMES = ["QDate", "QDateTime", "QString", "QTextStream", "QTime", "QUrl",
"QVariant"]
API_VERSION = 2
for name in API_NAMES:
sip.setapi(name, API_VERSION)
|
py
|
1a56edf50280c5fa6889ade178b890bf10c52af1
|
import numpy as np
import os, sys, gzip
import urllib, zipfile
from MTL_run import run_mtl
from sklearn.metrics import f1_score
def do_10_fold():
shared_nnet_spec= [ 1200 ]
individual_nnet_spec0= [ 1200, 1200 ]
individual_nnet_spec1= [ 1200, 1200 ]
individual_nnet_spec2= [ 1200, 1200 ]
individual_nnet_spec = [ individual_nnet_spec0, individual_nnet_spec1, individual_nnet_spec2 ]
learning_rate = 0.01
batch_size = 10
n_epochs = 10
dropout = 0.0
truth0 = []
pred0 = []
truth1 = []
pred1 = []
truth2 = []
pred2 = []
## Read files
file_path = os.path.dirname(os.path.realpath(__file__))
print file_path
lib_path = os.path.abspath(os.path.join(file_path, '..', '..', 'common'))
sys.path.append(lib_path)
from data_utils import get_file
origin = 'http://ftp.mcs.anl.gov/pub/candle/public/benchmarks/P3B1/P3B1_data.tgz'
data_loc = get_file('P3B1_data.tgz', origin, untar=True, md5_hash=None, cache_subdir='P3B1')
print 'Data downloaded and stored at: ' + data_loc
data_path = os.path.dirname(data_loc)
print data_path
for fold in range( 1 ):
feature_train_0 = np.genfromtxt( data_path + '/task0_' + str( fold ) + '_train_feature.csv', delimiter= ',' )
truth_train_0 = np.genfromtxt( data_path + '/task0_' + str( fold ) + '_train_label.csv', delimiter= ',' )
feature_test_0 = np.genfromtxt( data_path + '/task0_' + str( fold ) + '_test_feature.csv', delimiter= ',' )
truth_test_0 = np.genfromtxt( data_path + '/task0_' + str( fold ) + '_test_label.csv', delimiter= ',' )
feature_train_1 = np.genfromtxt( data_path + '/task1_' + str( fold ) + '_train_feature.csv', delimiter= ',' )
truth_train_1 = np.genfromtxt( data_path + '/task1_' + str( fold ) + '_train_label.csv', delimiter= ',' )
feature_test_1 = np.genfromtxt( data_path + '/task1_' + str( fold ) + '_test_feature.csv', delimiter= ',' )
truth_test_1 = np.genfromtxt( data_path + '/task1_' + str( fold ) + '_test_label.csv', delimiter= ',' )
feature_train_2 = np.genfromtxt( data_path + '/task2_' + str( fold ) + '_train_feature.csv', delimiter= ',' )
truth_train_2 = np.genfromtxt( data_path + '/task2_' + str( fold ) + '_train_label.csv', delimiter= ',' )
feature_test_2 = np.genfromtxt( data_path + '/task2_' + str( fold ) + '_test_feature.csv', delimiter= ',' )
truth_test_2 = np.genfromtxt( data_path + '/task2_' + str( fold ) + '_test_label.csv', delimiter= ',' )
features_train = [ feature_train_0, feature_train_1, feature_train_2 ]
truths_train = [ truth_train_0, truth_train_1, truth_train_2 ]
features_test = [ feature_test_0, feature_test_1, feature_test_2 ]
truths_test = [ truth_test_0, truth_test_1, truth_test_2 ]
ret = run_mtl(
features_train= features_train,
truths_train= truths_train,
features_test= features_test,
truths_test= truths_test,
shared_nnet_spec= shared_nnet_spec,
individual_nnet_spec= individual_nnet_spec,
learning_rate= learning_rate,
batch_size= batch_size,
n_epochs= n_epochs,
dropout= dropout
)
truth0.extend( ret[ 0 ][ 0 ] )
pred0.extend( ret[ 0 ][ 1 ] )
truth1.extend( ret[ 1 ][ 0 ] )
pred1.extend( ret[ 1 ][ 1 ] )
truth2.extend( ret[ 2 ][ 0 ] )
pred2.extend( ret[ 2 ][ 1 ] )
print 'Task 1: Primary site - Macro F1 score', f1_score( truth0, pred0, average= 'macro' )
print 'Task 1: Primary site - Micro F1 score', f1_score( truth0, pred0, average= 'micro' )
print 'Task 2: Tumor laterality - Macro F1 score', f1_score( truth1, pred1, average= 'macro' )
print 'Task 3: Tumor laterality - Micro F1 score', f1_score( truth1, pred1, average= 'micro' )
print 'Task 3: Histological grade - Macro F1 score', f1_score( truth2, pred2, average= 'macro' )
print 'Task 3: Histological grade - Micro F1 score', f1_score( truth2, pred2, average= 'micro' )
if __name__ == "__main__":
do_10_fold()
|
py
|
1a56ee00cedec52d54d602ec8c8d0e23a7399b3d
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def utalker():
pub = rospy.Publisher('chatter', String, queue_size=10)
# initialize ros node
rospy.init_node('talker', anonymous=True)
# set time loop rate
rate = rospy.Rate(10)
# Keep on Publishing the data
while not rospy.is_shutdown():
hello_str = "hello world %s" % rospy.get_time()
rospy.loginfo(hello_str)
pub.publish(hello_str)
rate.sleep()
if __name__ == '__main__':
try:
utalker()
except rospy.ROSInterruptException:
pass
|
py
|
1a56ee0dc7a2d23095a87b00f3886250037ad691
|
"""
Simple application that logs on to the APIC, pull all CDP neighbours,
and display in text table format
"""
import acitoolkit.acitoolkit as ACI
from acitoolkit import Node
from acitoolkit.aciConcreteLib import ConcreteCdp
from tabulate import tabulate
def main():
"""
Main show Cdps routine
:return: None
"""
# Take login credentials from the command line if provided
# Otherwise, take them from your environment variables file ~/.profile
description = ('Simple application that logs on to the APIC'
'and displays all the CDP neighbours.')
creds = ACI.Credentials('apic', description)
args = creds.get()
# Login to APIC
session = ACI.Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print('%% Could not login to APIC')
return
nodes = Node.get_deep(session, include_concrete=True)
cdps = []
for node in nodes:
node_concrete_cdp = node.get_children(child_type=ConcreteCdp)
for node_concrete_cdp_obj in node_concrete_cdp:
cdps.append(node_concrete_cdp_obj)
tables = ConcreteCdp.get_table(cdps)
output_list = []
for table in tables:
for table_data in table.data:
if table_data not in output_list:
output_list.append(table_data)
print tabulate(output_list, headers=["Node-ID",
"Local Interface",
"Neighbour Device",
"Neighbour Platform",
"Neighbour Interface"])
if __name__ == '__main__':
main()
|
py
|
1a56ee2a61fff920f8306cdc14d02f1894cc6008
|
import datetime
import pytz
import calendar
import os
cdt = datetime.datetime.now().date()
str_cdt=cdt.strftime("%d/%B")
print(str_cdt)
#заношу в виде строки
date_to_str=str(cdt)
#меняю в строке - на _
clear_str=date_to_str.replace('-',' ')
#разбиваю на отдельные слова
slice_str=clear_str.split()
#cоздаю новую дату +2 дня к ней
creat_date=datetime.datetime(int(slice_str[0]),int(slice_str[1]),int(slice_str[2])+2)
print("созданная дата 17.00 : ",creat_date)
# через dimedelta
td=datetime.timedelta(minutes=3)
print("отнимаем от созаднной даты 3 минуты(td) : ",creat_date-td)
#из строки делаем время
strptime=datetime.datetime.strptime('1803191327','%d%m%y%H%M')
print("из строки далем время",strptime)
# import sys
# print("sys.path : \n",sys.path)
# print("\n fp_date.__file__ : \n",pytz.__file__)
# print(fp_date.count_lines())
# print(fp_date.last_word_on_line(6666))
# print(fp_date.def_counter(fp_date.get_ip(1)))
print('_'*30)
format_date_on_log='17/May/2015:10:05:00'
#cоздаю дату из строки введенных данных
cdt=datetime.datetime(2020,5,5)
print('созданная дата из цфр ',cdt)
format_calculate_date='0:00:16.603720'
print("format_date_on_log=",format_date_on_log)
time_obj=datetime.datetime.strptime(format_date_on_log,'%d/%B/%Y:%H:%M:%S')
print("time_obj",time_obj)
tz_minsk = pytz.timezone("Europe/Minsk")
tz_utc = pytz.timezone('UTC')
ct_minsk=datetime.datetime.now()
d_minsk=tz_minsk.localize(ct_minsk)
utc_minsk=d_minsk.astimezone(tz_utc)
print('_'*30)
print(utc_minsk)
print(d_minsk)
#текущее время в UTC
# ct_utc=datetime.datetime.utcnow()
#текущенее время в +3
# ct_minsk=datetime.datetime.now()
#
# d_minsk=tz_minsk.localize(ct_minsk)
# d_utc=tz_utc.localize(ct_utc)
# d_minsk_utc=d_utc.astimezone(tz_utc)
# print("Вывод d_minsk в формете +3 Europe/Minsk :",d_minsk)
# print("Вывод d_minsk в формете UTC :",d_utc)
# print("Вывод d_minsk в формете d_minsk_utc :",d_minsk_utc)
os.system('cls' if os.name=='nt' else 'clear')
|
py
|
1a56eeafb74526cef1efe99435cc3d9240c6fd1c
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division, unicode_literals
import json
import math
import time
from datetime import date, datetime, timedelta
from decimal import Decimal
from json.encoder import encode_basestring
from math import floor
from mo_dots import Data, FlatList, Null, NullType, SLOT, is_data, is_list
from mo_future import PYPY, binary_type, is_binary, is_text, long, sort_using_key, text, utf8_json_encoder, xrange
from mo_json import ESCAPE_DCT, float2json, scrub
from mo_logs import Except
from mo_logs.strings import quote
from mo_times import Timer
from mo_times.dates import Date
from mo_times.durations import Duration
json_decoder = json.JSONDecoder().decode
_get = object.__getattribute__
_ = Except
# THIS FILE EXISTS TO SERVE AS A FAST REPLACEMENT FOR JSON ENCODING
# THE DEFAULT JSON ENCODERS CAN NOT HANDLE A DIVERSITY OF TYPES *AND* BE FAST
#
# 1) WHEN USING cPython, WE HAVE NO COMPILER OPTIMIZATIONS: THE BEST STRATEGY IS TO
# CONVERT THE MEMORY STRUCTURE TO STANDARD TYPES AND SEND TO THE INSANELY FAST
# DEFAULT JSON ENCODER
# 2) WHEN USING PYPY, WE USE CLEAR-AND-SIMPLE PROGRAMMING SO THE OPTIMIZER CAN DO
# ITS JOB. ALONG WITH THE UnicodeBuilder WE GET NEAR C SPEEDS
COMMA = u","
QUOTE = u'"'
COLON = u":"
QUOTE_COLON = QUOTE + COLON
COMMA_QUOTE = COMMA + QUOTE
PRETTY_COMMA = u", "
PRETTY_COLON = u": "
if PYPY:
# UnicodeBuilder IS ABOUT 2x FASTER THAN list()
from __pypy__.builders import UnicodeBuilder
else:
class UnicodeBuilder(list):
def __init__(self, length=None):
list.__init__(self)
def build(self):
return u"".join(self)
append = UnicodeBuilder.append
_dealing_with_problem = False
def pypy_json_encode(value, pretty=False):
"""
pypy DOES NOT OPTIMIZE GENERATOR CODE WELL
"""
global _dealing_with_problem
if pretty:
return pretty_json(value)
try:
_buffer = UnicodeBuilder(2048)
_value2json(value, _buffer)
output = _buffer.build()
return output
except Exception as e:
# THE PRETTY JSON WILL PROVIDE MORE DETAIL ABOUT THE SERIALIZATION CONCERNS
from mo_logs import Log
if _dealing_with_problem:
Log.error("Serialization of JSON problems", e)
else:
Log.warning("Serialization of JSON problems", e)
_dealing_with_problem = True
try:
return pretty_json(value)
except Exception as f:
Log.error("problem serializing object", f)
finally:
_dealing_with_problem = False
class cPythonJSONEncoder(object):
def __init__(self, sort_keys=True):
object.__init__(self)
self.encoder = utf8_json_encoder
def encode(self, value, pretty=False):
if pretty:
return pretty_json(value)
try:
with Timer("scrub", too_long=0.1):
scrubbed = scrub(value)
param = {"size": 0}
with Timer("encode {{size}} characters", param=param, too_long=0.1):
output = text(self.encoder(scrubbed))
param["size"] = len(output)
return output
except Exception as e:
from mo_logs.exceptions import Except
from mo_logs import Log
e = Except.wrap(e)
Log.warning("problem serializing {{type}}", type=text(repr(value)), cause=e)
raise e
def ujson_encode(value, pretty=False):
if pretty:
return pretty_json(value)
try:
scrubbed = scrub(value)
return ujson_dumps(scrubbed, ensure_ascii=False, sort_keys=True, escape_forward_slashes=False).decode('utf8')
except Exception as e:
from mo_logs.exceptions import Except
from mo_logs import Log
e = Except.wrap(e)
Log.warning("problem serializing {{type}}", type=text(repr(value)), cause=e)
raise e
def _value2json(value, _buffer):
try:
_class = value.__class__
if value is None:
append(_buffer, u"null")
return
elif value is True:
append(_buffer, u"true")
return
elif value is False:
append(_buffer, u"false")
return
type = value.__class__
if type is binary_type:
append(_buffer, QUOTE)
try:
v = value.decode('utf8')
except Exception as e:
problem_serializing(value, e)
for c in v:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE)
elif type is text:
append(_buffer, QUOTE)
for c in value:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE)
elif type is dict:
if not value:
append(_buffer, u"{}")
else:
_dict2json(value, _buffer)
return
elif type is Data:
d = _get(value, SLOT) # MIGHT BE A VALUE NOT A DICT
_value2json(d, _buffer)
return
elif type in (int, long, Decimal):
append(_buffer, text(value))
elif type is float:
if math.isnan(value) or math.isinf(value):
append(_buffer, u'null')
else:
append(_buffer, float2json(value))
elif type in (set, list, tuple, FlatList):
_list2json(value, _buffer)
elif type is date:
append(_buffer, float2json(time.mktime(value.timetuple())))
elif type is datetime:
append(_buffer, float2json(time.mktime(value.timetuple())))
elif type is Date:
append(_buffer, float2json(value.unix))
elif type is timedelta:
append(_buffer, float2json(value.total_seconds()))
elif type is Duration:
append(_buffer, float2json(value.seconds))
elif type is NullType:
append(_buffer, u"null")
elif is_data(value):
if not value:
append(_buffer, u"{}")
else:
_dict2json(value, _buffer)
return
elif hasattr(value, '__data__'):
d = value.__data__()
_value2json(d, _buffer)
elif hasattr(value, '__json__'):
j = value.__json__()
append(_buffer, j)
elif hasattr(value, '__iter__'):
_iter2json(value, _buffer)
else:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable")
except Exception as e:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable", cause=e)
def _list2json(value, _buffer):
if not value:
append(_buffer, u"[]")
else:
sep = u"["
for v in value:
append(_buffer, sep)
sep = COMMA
_value2json(v, _buffer)
append(_buffer, u"]")
def _iter2json(value, _buffer):
append(_buffer, u"[")
sep = u""
for v in value:
append(_buffer, sep)
sep = COMMA
_value2json(v, _buffer)
append(_buffer, u"]")
def _dict2json(value, _buffer):
try:
prefix = u"{\""
for k, v in value.items():
append(_buffer, prefix)
prefix = COMMA_QUOTE
if is_binary(k):
k = k.decode('utf8')
for c in k:
append(_buffer, ESCAPE_DCT.get(c, c))
append(_buffer, QUOTE_COLON)
_value2json(v, _buffer)
append(_buffer, u"}")
except Exception as e:
from mo_logs import Log
Log.error(text(repr(value)) + " is not JSON serializable", cause=e)
ARRAY_ROW_LENGTH = 80
ARRAY_ITEM_MAX_LENGTH = 30
ARRAY_MAX_COLUMNS = 10
INDENT = " "
def pretty_json(value):
try:
if value is False:
return "false"
elif value is True:
return "true"
elif is_data(value):
try:
items = sort_using_key(value.items(), lambda r: r[0])
values = [encode_basestring(k) + PRETTY_COLON + pretty_json(v) for k, v in items if v != None]
if not values:
return "{}"
elif len(values) == 1:
return "{" + values[0] + "}"
else:
return "{\n" + ",\n".join(indent(v) for v in values) + "\n}"
except Exception as e:
from mo_logs import Log
from mo_math import OR
if OR(not is_text(k) for k in value.keys()):
Log.error(
"JSON must have string keys: {{keys}}:",
keys=[k for k in value.keys()],
cause=e
)
Log.error(
"problem making dict pretty: keys={{keys}}:",
keys=[k for k in value.keys()],
cause=e
)
elif value in (None, Null):
return "null"
elif value.__class__ in (binary_type, text):
if is_binary(value):
value = value.decode('utf8')
try:
if "\n" in value and value.strip():
return pretty_json({"$concat": value.split("\n"), "separator": "\n"})
else:
return quote(value)
except Exception as e:
from mo_logs import Log
try:
Log.note("try explicit convert of string with length {{length}}", length=len(value))
acc = [QUOTE]
for c in value:
try:
try:
c2 = ESCAPE_DCT[c]
except Exception:
c2 = c
c3 = text(c2)
acc.append(c3)
except BaseException:
pass
# Log.warning("odd character {{ord}} found in string. Ignored.", ord= ord(c)}, cause=g)
acc.append(QUOTE)
output = u"".join(acc)
Log.note("return value of length {{length}}", length=len(output))
return output
except BaseException as f:
Log.warning("can not convert {{type}} to json", type=f.__class__.__name__, cause=f)
return "null"
elif is_list(value):
if not value:
return "[]"
if ARRAY_MAX_COLUMNS == 1:
return "[\n" + ",\n".join([indent(pretty_json(v)) for v in value]) + "\n]"
if len(value) == 1:
j = pretty_json(value[0])
if j.find("\n") >= 0:
return "[\n" + indent(j) + "\n]"
else:
return "[" + j + "]"
js = [pretty_json(v) for v in value]
max_len = max(*[len(j) for j in js])
if max_len <= ARRAY_ITEM_MAX_LENGTH and max(*[j.find("\n") for j in js]) == -1:
# ALL TINY VALUES
num_columns = max(1, min(ARRAY_MAX_COLUMNS, int(floor((ARRAY_ROW_LENGTH + 2.0) / float(max_len + 2))))) # +2 TO COMPENSATE FOR COMMAS
if len(js) <= num_columns: # DO NOT ADD \n IF ONLY ONE ROW
return "[" + PRETTY_COMMA.join(js) + "]"
if num_columns == 1: # DO NOT rjust IF THERE IS ONLY ONE COLUMN
return "[\n" + ",\n".join([indent(pretty_json(v)) for v in value]) + "\n]"
content = ",\n".join(
PRETTY_COMMA.join(j.rjust(max_len) for j in js[r:r + num_columns])
for r in xrange(0, len(js), num_columns)
)
return "[\n" + indent(content) + "\n]"
pretty_list = js
output = ["[\n"]
for i, p in enumerate(pretty_list):
try:
if i > 0:
output.append(",\n")
output.append(indent(p))
except Exception:
from mo_logs import Log
Log.warning("problem concatenating string of length {{len1}} and {{len2}}",
len1=len("".join(output)),
len2=len(p)
)
output.append("\n]")
try:
return "".join(output)
except Exception as e:
from mo_logs import Log
Log.error("not expected", cause=e)
elif hasattr(value, '__data__'):
d = value.__data__()
return pretty_json(d)
elif hasattr(value, '__json__'):
j = value.__json__()
if j == None:
return " null " # TODO: FIND OUT WHAT CAUSES THIS
return pretty_json(json_decoder(j))
elif scrub(value) is None:
return "null"
elif hasattr(value, '__iter__'):
return pretty_json(list(value))
elif hasattr(value, '__call__'):
return "null"
else:
try:
if int(value) == value:
return text(int(value))
except Exception:
pass
try:
if float(value) == value:
return text(float(value))
except Exception:
pass
return pypy_json_encode(value)
except Exception as e:
problem_serializing(value, e)
def problem_serializing(value, e=None):
"""
THROW ERROR ABOUT SERIALIZING
"""
from mo_logs import Log
try:
typename = type(value).__name__
except Exception:
typename = "<error getting name>"
try:
rep = text(repr(value))
except Exception as _:
rep = None
if rep == None:
Log.error(
"Problem turning value of type {{type}} to json",
type=typename,
cause=e
)
else:
Log.error(
"Problem turning value ({{value}}) of type {{type}} to json",
value=rep,
type=typename,
cause=e
)
def indent(value, prefix=INDENT):
try:
content = value.rstrip()
suffix = value[len(content):]
lines = content.splitlines()
return prefix + (u"\n" + prefix).join(lines) + suffix
except Exception as e:
raise Exception(u"Problem with indent of value (" + e.message + u")\n" + value)
def value_compare(a, b):
if a == None:
if b == None:
return 0
return -1
elif b == None:
return 1
if a > b:
return 1
elif a < b:
return -1
else:
return 0
def datetime2milli(d, type):
try:
if type == datetime:
diff = d - datetime(1970, 1, 1)
else:
diff = d - date(1970, 1, 1)
return long(diff.total_seconds()) * long(1000) + long(diff.microseconds / 1000)
except Exception as e:
problem_serializing(d, e)
def unicode_key(key):
"""
CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME
"""
if not isinstance(key, (text, binary_type)):
from mo_logs import Log
Log.error("{{key|quote}} is not a valid key", key=key)
return quote(text(key))
# OH HUM, cPython with uJSON, OR pypy WITH BUILTIN JSON?
# http://liangnuren.wordpress.com/2012/08/13/python-json-performance/
# http://morepypy.blogspot.ca/2011/10/speeding-up-json-encoding-in-pypy.html
if PYPY:
json_encoder = pypy_json_encode
else:
# from ujson import dumps as ujson_dumps
# json_encoder = ujson_encode
json_encoder = cPythonJSONEncoder().encode
|
py
|
1a56effba0f4b75bf19dd7d0d0cb6418ced372ce
|
from spoof import *
from test_unit import TestUnit
|
py
|
1a56f0cf3118a1a40d2e8fcaedf00e40643b1d4e
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from copy import deepcopy
from nnunet.network_architecture.generic_UNet import Generic_UNet
import SimpleITK as sitk
import shutil
from batchgenerators.utilities.file_and_folder_operations import join
def split_4d_nifti(filename, output_folder):
img_itk = sitk.ReadImage(filename)
dim = img_itk.GetDimension()
file_base = filename.split("/")[-1]
if dim == 3:
shutil.copy(filename, join(output_folder, file_base[:-7] + "_0000.nii.gz"))
return
elif dim != 4:
raise RuntimeError("Unexpected dimensionality: %d of file %s, cannot split" % (dim, filename))
else:
img_npy = sitk.GetArrayFromImage(img_itk)
spacing = img_itk.GetSpacing()
origin = img_itk.GetOrigin()
direction = np.array(img_itk.GetDirection()).reshape(4,4)
# now modify these to remove the fourth dimension
spacing = tuple(list(spacing[:-1]))
origin = tuple(list(origin[:-1]))
direction = tuple(direction[:-1, :-1].reshape(-1))
for i, t in enumerate(range(img_npy.shape[0])):
img = img_npy[t]
img_itk_new = sitk.GetImageFromArray(img)
img_itk_new.SetSpacing(spacing)
img_itk_new.SetOrigin(origin)
img_itk_new.SetDirection(direction)
sitk.WriteImage(img_itk_new, join(output_folder, file_base[:-7] + "_%04.0d.nii.gz" % i))
def get_pool_and_conv_props_poolLateV2(patch_size, min_feature_map_size, max_numpool, spacing):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
initial_spacing = deepcopy(spacing)
reach = max(initial_spacing)
dim = len(patch_size)
num_pool_per_axis = get_network_numpool(patch_size, max_numpool, min_feature_map_size)
net_num_pool_op_kernel_sizes = []
net_conv_kernel_sizes = []
net_numpool = max(num_pool_per_axis)
current_spacing = spacing
for p in range(net_numpool):
reached = [current_spacing[i] / reach > 0.5 for i in range(dim)]
pool = [2 if num_pool_per_axis[i] + p >= net_numpool else 1 for i in range(dim)]
if all(reached):
conv = [3] * dim
else:
conv = [3 if not reached[i] else 1 for i in range(dim)]
net_num_pool_op_kernel_sizes.append(pool)
net_conv_kernel_sizes.append(conv)
current_spacing = [i * j for i, j in zip(current_spacing, pool)]
net_conv_kernel_sizes.append([3] * dim)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
return num_pool_per_axis, net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, patch_size, must_be_divisible_by
def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
dim = len(spacing)
current_spacing = deepcopy(list(spacing))
current_size = deepcopy(list(patch_size))
pool_op_kernel_sizes = []
conv_kernel_sizes = []
num_pool_per_axis = [0] * dim
while True:
# This is a problem because sometimes we have spacing 20, 50, 50 and we want to still keep pooling.
# Here we would stop however. This is not what we want! Fixed in get_pool_and_conv_propsv2
min_spacing = min(current_spacing)
valid_axes_for_pool = [i for i in range(dim) if current_spacing[i] / min_spacing < 2]
axes = []
for a in range(dim):
my_spacing = current_spacing[a]
partners = [i for i in range(dim) if current_spacing[i] / my_spacing < 2 and my_spacing / current_spacing[i] < 2]
if len(partners) > len(axes):
axes = partners
conv_kernel_size = [3 if i in axes else 1 for i in range(dim)]
# exclude axes that we cannot pool further because of min_feature_map_size constraint
#before = len(valid_axes_for_pool)
valid_axes_for_pool = [i for i in valid_axes_for_pool if current_size[i] >= 2*min_feature_map_size]
#after = len(valid_axes_for_pool)
#if after == 1 and before > 1:
# break
valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
if len(valid_axes_for_pool) == 0:
break
#print(current_spacing, current_size)
other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
pool_kernel_sizes = [0] * dim
for v in valid_axes_for_pool:
pool_kernel_sizes[v] = 2
num_pool_per_axis[v] += 1
current_spacing[v] *= 2
current_size[v] = np.ceil(current_size[v] / 2)
for nv in other_axes:
pool_kernel_sizes[nv] = 1
pool_op_kernel_sizes.append(pool_kernel_sizes)
conv_kernel_sizes.append(conv_kernel_size)
#print(conv_kernel_sizes)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
conv_kernel_sizes.append([3]*dim)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_pool_and_conv_props_v2(spacing, patch_size, min_feature_map_size, max_numpool):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
dim = len(spacing)
current_spacing = deepcopy(list(spacing))
current_size = deepcopy(list(patch_size))
pool_op_kernel_sizes = []
conv_kernel_sizes = []
num_pool_per_axis = [0] * dim
kernel_size = [1] * dim
while True:
# exclude axes that we cannot pool further because of min_feature_map_size constraint
valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
if len(valid_axes_for_pool) < 1:
break
spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
# find axis that are within factor of 2 within smallest spacing
min_spacing_of_valid = min(spacings_of_axes)
valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
# max_numpool constraint
valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
if len(valid_axes_for_pool) == 1:
if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
pass
else:
break
if len(valid_axes_for_pool) < 1:
break
# now we need to find kernel sizes
# kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
# factor 2 of min_spacing. Once they are 3 they remain 3
for d in range(dim):
if kernel_size[d] == 3:
continue
else:
if spacings_of_axes[d] / min(current_spacing) < 2:
kernel_size[d] = 3
other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
pool_kernel_sizes = [0] * dim
for v in valid_axes_for_pool:
pool_kernel_sizes[v] = 2
num_pool_per_axis[v] += 1
current_spacing[v] *= 2
current_size[v] = np.ceil(current_size[v] / 2)
for nv in other_axes:
pool_kernel_sizes[nv] = 1
pool_op_kernel_sizes.append(pool_kernel_sizes)
conv_kernel_sizes.append(deepcopy(kernel_size))
#print(conv_kernel_sizes)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
conv_kernel_sizes.append([3]*dim)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_shape_must_be_divisible_by(net_numpool_per_axis):
return 2 ** np.array(net_numpool_per_axis)
def pad_shape(shape, must_be_divisible_by):
"""
pads shape so that it is divisibly by must_be_divisible_by
:param shape:
:param must_be_divisible_by:
:return:
"""
if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
must_be_divisible_by = [must_be_divisible_by] * len(shape)
else:
assert len(must_be_divisible_by) == len(shape)
new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
for i in range(len(shape)):
if shape[i] % must_be_divisible_by[i] == 0:
new_shp[i] -= must_be_divisible_by[i]
new_shp = np.array(new_shp).astype(int)
return new_shp
def get_network_numpool(patch_size, maxpool_cap=999, min_feature_map_size=4):
network_numpool_per_axis = np.floor([np.log(i / min_feature_map_size) / np.log(2) for i in patch_size]).astype(int)
network_numpool_per_axis = [min(i, maxpool_cap) for i in network_numpool_per_axis]
return network_numpool_per_axis
if __name__ == '__main__':
# trying to fix https://github.com/MIC-DKFZ/nnUNet/issues/261
median_shape = [24, 504, 512]
spacing = [5.9999094, 0.50781202, 0.50781202]
num_pool_per_axis, net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, patch_size, must_be_divisible_by = get_pool_and_conv_props_poolLateV2(median_shape, min_feature_map_size=4, max_numpool=999, spacing=spacing)
|
py
|
1a56f160767f49bf3abc44b4d1df014ca7ce0abe
|
# coding: utf-8
"""
Schemas
The CRM uses schemas to define how custom objects should store and represent information in the HubSpot CRM. Schemas define details about an object's type, properties, and associations. The schema can be uniquely identified by its **object type ID**. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from hubspot.crm.schemas.api_client import ApiClient
from hubspot.crm.schemas.exceptions import ApiTypeError, ApiValueError # noqa: F401
class PublicObjectSchemasApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def purge(self, object_type, **kwargs): # noqa: E501
"""purge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.purge(object_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str object_type: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.purge_with_http_info(object_type, **kwargs) # noqa: E501
def purge_with_http_info(self, object_type, **kwargs): # noqa: E501
"""purge # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.purge_with_http_info(object_type, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str object_type: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["object_type"]
all_params.extend(["async_req", "_return_http_data_only", "_preload_content", "_request_timeout"])
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'" " to method purge" % key)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'object_type' is set
if self.api_client.client_side_validation and ("object_type" not in local_var_params or local_var_params["object_type"] is None): # noqa: E501 # noqa: E501
raise ApiValueError("Missing the required parameter `object_type` when calling `purge`") # noqa: E501
collection_formats = {}
path_params = {}
if "object_type" in local_var_params:
path_params["objectType"] = local_var_params["object_type"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(["*/*"]) # noqa: E501
# Authentication setting
auth_settings = ["hapikey"] # noqa: E501
return self.api_client.call_api(
"/crm/v3/schemas/{objectType}/purge",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get("_return_http_data_only"), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
|
py
|
1a56f316e0f99e5899c0fc6c681344ebca0e8aad
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
db = pymongo.MongoClient()
xianyu = db.python.xianyu
class ErshouPipeline(object):
def process_item(self, item, spider):
return item
class WriteMongo(object):
def process_item(self, item, spider):
data = dict(item)
xianyu.insert(data)
return item
|
py
|
1a56f3a0a4a9b36e4020f260f5b79bb590b4ee1f
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, QBasicTimer
from PyQt5.QtGui import QPen, QColor, QBrush
import time
class Settings:
BLOCK_WIDTH = 10
BLOCK_HEIGHT = 10
NUM_BLOCKS_X = 50
NUM_BLOCKS_Y = 50
SCREEN_WIDTH = BLOCK_WIDTH * NUM_BLOCKS_X
SCREEN_HEIGHT = BLOCK_HEIGHT * NUM_BLOCKS_Y
class AppScene(QGraphicsScene):
SPEED = 50
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lines = []
self.create_ant()
self.draw_grid()
self.set_opacity(0.7)
self.ant_x = 50
self.timer = QBasicTimer()
#print(self.itemAt(Settings.NUM_BLOCKS_X // 2, Settings.NUM_BLOCKS_Y // 2, QTransform()))
#time.sleep(1)
#self.move_ant(Settings.BLOCK_HEIGHT*9, 0)
self.start()
def start(self):
self.addItem(self.ant)
self.timer.start(self.SPEED, self)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
self.set_ant(self.ant_x % 500 - 250, -40)
self.ant_x += 10
def move_ant(self, dx, dy):
self.ant.moveBy(dx, dy)
def set_ant(self, x, y):
self.ant.setPos(x, y)
def create_ant(self):
self.ant = QGraphicsEllipseItem(Settings.SCREEN_WIDTH // 2, Settings.SCREEN_HEIGHT // 2, Settings.BLOCK_WIDTH, Settings.BLOCK_HEIGHT)
self.ant.setBrush(QBrush(QColor(0, 0, 255), Qt.SolidPattern))
def draw_grid(self):
width = Settings.SCREEN_WIDTH
height = Settings.SCREEN_HEIGHT
self.setSceneRect(0, 0, width, height)
pen = QPen(QColor(100, 100, 100), 1, Qt.SolidLine)
for x in range(0, Settings.NUM_BLOCKS_X + 1):
_x = x * Settings.BLOCK_WIDTH
self.lines.append(self.addLine(_x, 0, _x, height, pen))
for y in range(0, Settings.NUM_BLOCKS_Y + 1):
_y = y * Settings.BLOCK_HEIGHT
self.lines.append(self.addLine(0, _y, width, _y, pen))
def sert_visible(self, visible=True):
for line in self.lines:
line.setVisible(visible)
def draw_rect(self, col, row, color):
col = col * Settings.BLOCK_HEIGHT
row = row * Settings.BLOCK_WIDTH
rect = QGraphicsRectItem(row, col, Settings.BLOCK_WIDTH, Settings.BLOCK_HEIGHT)
rect.setBrush(QBrush(color, Qt.SolidPattern))
self.addItem(rect)
def delete_grid(self):
for line in self.lines:
self.removeItem(line)
del self.lines[:]
def set_opacity(self, opacity):
for line in self.lines:
line.setOpacity(opacity)
class Appview(QGraphicsView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setFixedSize(Settings.SCREEN_WIDTH + 10, Settings.SCREEN_HEIGHT + 10)
def drawBackground(self, painter, rect):
super().drawBackground(painter, rect)
if __name__ == "__main__":
import sys
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
app = QApplication(sys.argv)
QScene = AppScene()
win = Appview()
win.setScene(QScene)
win.show()
sys.exit(app.exec_())
|
py
|
1a56f486e041e115309bb95b2b2fe9ac8babca60
|
import os
import shutil
from conans.client.source import complete_recipe_sources
from conans.errors import ConanException
from conans.model.ref import ConanFileReference, PackageReference
from conans.util.files import rmdir
def _prepare_sources(cache, ref, remote_manager, loader, remotes):
conan_file_path = cache.package_layout(ref).conanfile()
conanfile = loader.load_class(conan_file_path)
complete_recipe_sources(remote_manager, cache, conanfile, ref, remotes)
return conanfile.short_paths
def _get_package_ids(cache, ref, package_ids):
if not package_ids:
return []
if package_ids is True:
packages = cache.package_layout(ref).packages()
if os.path.exists(packages):
package_ids = os.listdir(packages)
else:
package_ids = []
return package_ids
def cmd_copy(ref, user_channel, package_ids, cache, user_io, remote_manager, loader, remotes,
force=False):
"""
param package_ids: Falsey=do not copy binaries. True=All existing. []=list of ids
"""
# It is important to get the revision early, so "complete_recipe_sources" can
# get the right revision sources, not latest
layout = cache.package_layout(ref)
src_metadata = layout.load_metadata()
ref = ref.copy_with_rev(src_metadata.recipe.revision)
short_paths = _prepare_sources(cache, ref, remote_manager, loader, remotes)
package_ids = _get_package_ids(cache, ref, package_ids)
package_copy(ref, user_channel, package_ids, cache, user_io, short_paths, force)
def package_copy(src_ref, user_channel, package_ids, cache, user_io, short_paths=False,
force=False):
dest_ref = ConanFileReference.loads("%s/%s@%s" % (src_ref.name,
src_ref.version,
user_channel))
# Generate metadata
src_layout = cache.package_layout(src_ref, short_paths)
src_metadata = src_layout.load_metadata()
dst_layout = cache.package_layout(dest_ref, short_paths)
# Copy export
export_origin = src_layout.export()
if not os.path.exists(export_origin):
raise ConanException("'%s' doesn't exist" % str(src_ref))
export_dest = dst_layout.export()
if os.path.exists(export_dest):
if not force and not user_io.request_boolean("'%s' already exist. Override?"
% str(dest_ref)):
return
rmdir(export_dest)
shutil.copytree(export_origin, export_dest, symlinks=True)
user_io.out.info("Copied %s to %s" % (str(src_ref), str(dest_ref)))
export_sources_origin = src_layout.export_sources()
export_sources_dest = dst_layout.export_sources()
if os.path.exists(export_sources_dest):
rmdir(export_sources_dest)
shutil.copytree(export_sources_origin, export_sources_dest, symlinks=True)
user_io.out.info("Copied sources %s to %s" % (str(src_ref), str(dest_ref)))
# Copy packages
package_revisions = {} # To be stored in the metadata
for package_id in package_ids:
pref_origin = PackageReference(src_ref, package_id)
pref_dest = PackageReference(dest_ref, package_id)
package_path_origin = src_layout.package(pref_origin)
package_path_dest = dst_layout.package(pref_dest)
if os.path.exists(package_path_dest):
if not force and not user_io.request_boolean("Package '%s' already exist."
" Override?" % str(package_id)):
continue
rmdir(package_path_dest)
package_revisions[package_id] = (src_metadata.packages[package_id].revision,
src_metadata.recipe.revision)
shutil.copytree(package_path_origin, package_path_dest, symlinks=True)
user_io.out.info("Copied %s to %s" % (str(package_id), str(dest_ref)))
# Generate the metadata
with dst_layout.update_metadata() as metadata:
metadata.recipe.revision = src_metadata.recipe.revision
for package_id, (revision, recipe_revision) in package_revisions.items():
metadata.packages[package_id].revision = revision
metadata.packages[package_id].recipe_revision = recipe_revision
|
py
|
1a56f53a60712ed91642b4d9e20c2a791686c256
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="layout.xaxis.rangeselector.font",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
py
|
1a56f5a86688c86da22af72da83870936fa88f07
|
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from robot_localisation.main import main
main()
|
py
|
1a56f7b5ccf62d8a61f3e3b790fa787165541f9f
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._accounts_operations import AccountsOperations
from ._consumer_invitations_operations import ConsumerInvitationsOperations
from ._data_sets_operations import DataSetsOperations
from ._data_set_mappings_operations import DataSetMappingsOperations
from ._invitations_operations import InvitationsOperations
from ._operations import Operations
from ._shares_operations import SharesOperations
from ._provider_share_subscriptions_operations import ProviderShareSubscriptionsOperations
from ._share_subscriptions_operations import ShareSubscriptionsOperations
from ._consumer_source_data_sets_operations import ConsumerSourceDataSetsOperations
from ._synchronization_settings_operations import SynchronizationSettingsOperations
from ._triggers_operations import TriggersOperations
__all__ = [
'AccountsOperations',
'ConsumerInvitationsOperations',
'DataSetsOperations',
'DataSetMappingsOperations',
'InvitationsOperations',
'Operations',
'SharesOperations',
'ProviderShareSubscriptionsOperations',
'ShareSubscriptionsOperations',
'ConsumerSourceDataSetsOperations',
'SynchronizationSettingsOperations',
'TriggersOperations',
]
|
py
|
1a56f855c65ff2594bbe7fcab1c9c207c84e2ab6
|
"""converted from ..\fonts\50146chareuro_8x8in8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x30\x78\x78\x30\x30\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x6c\x6c\x6c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x6c\x6c\xfe\x6c\xfe\x6c\x6c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x7e\xd0\x7c\x16\xfc\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\xc6\xcc\x18\x30\x66\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x38\x6c\x38\x76\xdc\xcc\x76\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x60\x60\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x30\x60\x60\x60\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x60\x30\x18\x18\x18\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x30\x30\xfc\x30\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x30\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x7c\xc6\xce\xde\xf6\xe6\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x30\x70\x30\x30\x30\x30\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\xcc\x0c\x38\x60\xcc\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\xcc\x0c\x38\x0c\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x1c\x3c\x6c\xcc\xfe\x0c\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\xc0\xf8\x0c\x0c\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x38\x60\xc0\xf8\xcc\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\xcc\x0c\x18\x30\x30\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\xcc\xcc\x78\xcc\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\xcc\xcc\x7c\x0c\x18\x70\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x30\x30\x00\x00\x30\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x00\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x30\x60\xc0\x60\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xfc\x00\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x60\x30\x18\x0c\x18\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\xcc\x0c\x18\x30\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x7c\xc6\xce\xde\xdc\xc0\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x30\x78\xcc\xcc\xfc\xcc\xcc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\x66\x66\x7c\x66\x66\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x3c\x66\xc0\xc0\xc0\x66\x3c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xf8\x6c\x66\x66\x66\x6c\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfe\x62\x68\x78\x68\x62\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfe\x62\x68\x78\x68\x60\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x3c\x66\xc0\xc0\xce\x66\x3e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xcc\xcc\xcc\xfc\xcc\xcc\xcc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\x30\x30\x30\x30\x30\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x1e\x0c\x0c\x0c\xcc\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xe6\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xf0\x60\x60\x60\x60\x66\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc6\xee\xfe\xfe\xd6\xc6\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc6\xe6\xf6\xde\xce\xc6\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\x66\x66\x7c\x60\x60\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x7c\xc6\xc6\xc6\xd6\x7c\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\x66\x66\x7c\x6c\x66\xe6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x7c\xc6\x60\x38\x0c\xc6\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfc\xb4\x30\x30\x30\x30\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xcc\xcc\xcc\xcc\xcc\x78\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc6\xc6\xc6\xd6\xfe\xee\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc6\xc6\x6c\x38\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xcc\xcc\xcc\x78\x30\x30\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xfe\xc6\x0c\x18\x30\x66\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\x60\x60\x60\x60\x60\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xc0\x60\x30\x18\x0c\x06\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x78\x18\x18\x18\x18\x18\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x0c\x7c\xcc\x76\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xe0\x60\x60\x7c\x66\x66\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\xcc\xc0\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x1c\x0c\x0c\x7c\xcc\xcc\x76\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\xcc\xfc\xc0\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x3c\x66\x60\xf0\x60\x60\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\xf8\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xe0\x60\x6c\x76\x66\x66\xe6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x30\x00\x70\x30\x30\x30\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x0c\x00\x0c\x0c\x0c\xcc\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xe0\x60\x66\x6c\x78\x6c\xe6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x70\x30\x30\x30\x30\x30\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xcc\xfe\xfe\xd6\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xdc\x66\x66\x66\x66\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\xcc\xcc\xcc\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xdc\x66\x66\x7c\x60\xf0\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\x1e\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xdc\x76\x66\x60\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc2\x38\x86\x7c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x10\x30\x7c\x30\x30\x34\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xcc\xcc\xcc\x78\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xd6\xfe\x6c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xc6\x6c\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xcc\xcc\xcc\x7c\x0c\xf8\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\xfc\x98\x30\x64\xfc\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x1c\x30\x30\xe0\x30\x30\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x18\x18\x00\x18\x18\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\xe0\x30\x30\x1c\x30\x30\xe0\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
|
py
|
1a56f9e74416561ec16571c406524963614d0d43
|
import cv2
cv2_data_dir = '/usr/local/lib/python3.7/dist-packages/cv2/data/'
face_cascade = cv2.CascadeClassifier(cv2_data_dir + 'haarcascade_frontalface_default.xml')
img = cv2.imread('faces.jpg', cv2.IMREAD_GRAYSCALE)
scale_factor = 1.4
min_neighbours = 5
faces = face_cascade.detectMultiScale(img, scale_factor, min_neighbours)
print(faces)
for (x,y,w,h) in faces:
img = cv2.rectangle(img, (x,y), (x+w,y+h), (255, 255, 255), 2)
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
py
|
1a56fa8883465db598a0e33a13d87848d6b17be2
|
"""
Overrides the tf.keras.metrics.MeanIoU in order to use the one-hot encoded predictions instead of argmax.
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
class SoftmaxSingleMeanIoU(keras.metrics.MeanIoU):
"""
mIoU metric specific to only one class that uses softmax input (not argmax input).
"""
def __init__(self, label, name=None, dtype=None):
"""
:param name: name used for the metric
:param label: ID (integer) of the class used
"""
super(SoftmaxSingleMeanIoU, self).__init__(
2, name=name, dtype=dtype)
self.label = label
def update_state(self, y_true, y_pred, sample_weight=None):
# Change the softmax masks into the argmax equivalent to make the metric work
y_true = K.cast(K.equal(K.argmax(y_true), self.label), K.floatx())
y_pred = K.cast(K.equal(K.argmax(y_pred), self.label), K.floatx())
return super(SoftmaxSingleMeanIoU, self).update_state(
y_true, y_pred, sample_weight=sample_weight)
def result(self):
return super(SoftmaxSingleMeanIoU, self).result()
def reset_states(self):
super(SoftmaxSingleMeanIoU, self).reset_states()
def get_config(self):
return super(SoftmaxSingleMeanIoU, self).get_config()
class SoftmaxMeanIoU(keras.metrics.MeanIoU):
def __init__(self, num_classes, name=None, dtype=None):
"""
:param: num_classes: number of class available
:param name: name used for the metric
"""
super(SoftmaxMeanIoU, self).__init__(
num_classes, name=name, dtype=dtype)
def update_state(self, y_true, y_pred, sample_weight=None):
# Change the softmax masks into the argmax equivalent to make the metric work
y_true = K.argmax(y_true)
y_pred = K.argmax(y_pred)
return super(SoftmaxMeanIoU, self).update_state(
y_true, y_pred, sample_weight=sample_weight)
def result(self):
return super(SoftmaxMeanIoU, self).result()
def reset_states(self):
super(SoftmaxMeanIoU, self).reset_states()
def get_config(self):
return super(SoftmaxMeanIoU, self).get_config()
|
py
|
1a56fae0948f0e108e53ba32e4b03092c424b5dd
|
import base64
import datetime
import plotly
import plotly.figure_factory as ff
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import json
import dashUI.run as run
import dashUI.run_OD as run_OD
iterationList = []
lossList = []
epochOfLossList = []
epochOfTop1ErrorList = []
epochOfMeanAPList = []
TrainSet_top1_error_valueList = []
ValidationSet_top1_error_valueList = []
TrainSet_mean_ap_valueList = []
ValidationSet_mean_ap_valueList = []
metricList = []
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
image_filename = 'C:/Users/930415/PycharmProjects/chadle/dashUI/icon.png' # replace with your own image
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
CLProjectNames = ','.join(run.CLProjectList)
ODProjectNames = ','.join(run.ODProjectList)
app.layout = html.Div([
html.Center(html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), width='80', height='70')),
# Title
html.H1('CHaDLE ',
style={
"font": 'verdana',
'textAlign': 'center',
'color': 'Black'
}
),
# Tabs for CL and OD
html.Div([
dcc.Tabs(id='AllTab', value='AllTab', children=[
# Classification Tab
dcc.Tab(label='Classification', value='CLTab', children=[
html.Div([html.Th(children='Available Classification Projects: ' + CLProjectNames, colSpan="1"),
html.Br(),
"Project Name:",
dcc.Input(
id='ProjectName_CL', value='Animals', type='text'
),
html.Br(),
"Training Device:", dcc.RadioItems(
id='Runtime_CL',
options=[{'label': i, 'value': i} for i in ['cpu', 'gpu']],
value='cpu',
labelStyle={'display': 'inline-block'}
),
"Pretrained Model:", dcc.Dropdown(
id='PretrainedModel_CL',
options=[{'label': i, 'value': i} for i in ["classifier_enhanced", "classifier_compact"]],
value='classifier_compact'
),
],
style={'width': '25%', 'display': 'inline-block'}
),
html.Br(),
html.Br(),
html.Div([
html.Div([
html.Label('Image Width'),
dcc.Input(id='ImWidth_CL', value='100 ', type='number', min=0, step=1, ),
html.Label('Image Height'),
dcc.Input(id='ImHeight_CL', value='100', type='number', min=0, step=1, ),
html.Label('Image Channel'),
dcc.Input(id='ImChannel_CL', value='3', type='number', min=0, step=1, ),
html.Label('Batch Size'),
dcc.Input(id='BatchSize_CL', value='1', type='number', min=0, step=1, ),
html.Label('Initial Learning Rate'),
dcc.Input(id='InitialLearningRate_CL', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Momentum'),
dcc.Input(id='Momentum_CL', value='0.09', type='number', min=0, step=0.00001, ),
html.Label('Number of Epochs'),
dcc.Input(id='NumEpochs_CL', value='2', type='number', min=0, step=1, ),
html.Label('Change Learning Rate @ Epochs'),
dcc.Input(id='ChangeLearningRateEpochs_CL', value='50,100', type='text'),
html.Label('Learning Rate Schedule'),
dcc.Input(id='lr_change_CL', value='0.01,0.05', type='text'),
html.Label('Regularisation Constant'),
dcc.Input(id='WeightPrior_CL', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Class Penalty'),
dcc.Input(id='class_penalty_CL', value='0,0', type='text'),
],
style={'width': '20%', 'display': 'inline-block'}),
html.Div([
html.Label('Augmentation Percentage'),
dcc.Input(id='AugmentationPercentage_CL', value='100', type='number', min=0, max=100, step=1, ),
html.Label('Rotation'),
dcc.Input(id='Rotation_CL', value='90', type='number', min=-180, max=180, step=90, ),
html.Label('Mirror (off,c,r,rc)'),
dcc.Input(id='mirror_CL', value='off', type='text', ),
html.Label('Brightness Variation'),
dcc.Input(id='BrightnessVariation_CL', value='1', type='number', min=-100, max=100, step=1, ),
html.Label('Brightness Variation Spot'),
dcc.Input(id='BrightnessVariationSpot_CL', value='1', type='number', min=-100, max=100,
step=1, ),
html.Label('Rotation Range (Step of 1)'),
dcc.Input(id='RotationRange_CL', value='1', type='number', min=1, step=1, ),
# html.Label('Ignore Direction'),
# dcc.Input(id='IgnoreDirection', value='false', type='text'),
# html.Label('Class IDs No Orientation Exist'),
# dcc.Input(id='ClassIDsNoOrientationExist', value='false', type='text'),
# html.Label('Class IDs No Orientation'),
# dcc.Input(id='ClassIDsNoOrientation', value='[]', type='text'),
],
style={'width': '20%', 'float': 'left', 'display': 'inline-block'}),
html.Div([html.H4('Evaluation'),
html.Div(id='evaluation_text_CL'),
dcc.Graph(id='evaluation_graph_CL'),
],
style={'width': '50%', 'float': 'right', }),
dcc.Interval(
id='interval-evaluation_CL',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
]),
html.Br(),
html.Br(),
dcc.Loading(
id="loading-1",
type="default",
children=[html.Div(id="Training_loading_CL"),
html.Div(id="Evaluation_loading_CL")]
),
html.Div([
# html.Button(id='submit-button-state', n_clicks=0, children='Submit'),
html.Button(id='operation_button_CL', n_clicks=0, children='Start Training'),
# html.Button(id='train_button', n_clicks=0, children='Train'),
# html.Button(id='parameters_out_button', n_clicks=0, children='Output Parameters'),
html.Button(id='evaluation_button_CL', n_clicks=0, children='Evaluation'),
],
style={
'width': '70%', 'float': 'right',
}
),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Div(id='Operation_output_CL'),
html.Div(id='makeJson_CL'),
# html.Div(id='output-state'),
# html.Div(id='Train Result'),
# html.Div(id='Evaluation Result'),
# Graph Plotter
html.Div([
html.H1('CHaDLE Training Monitor - Classification',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='metrics_CL',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Blue'
}),
dcc.Graph(id='iteration_loss_graph_CL'),
dcc.Graph(id='top1_error_graph_CL'),
dcc.Interval(
id='interval_graph_CL',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
])
]),
# Object Detection Tab
dcc.Tab(label='Object Detection', value='ODTab', children=[
# Basic inputs
html.Div([html.Th(children='Available Object Detection Projects: ' + ODProjectNames, colSpan="1"),
html.Br(),
"Project Name:",
dcc.Input(
id='ProjectName_OD', value='NTBW', type='text'
),
html.Br(),
"Training Device:", dcc.RadioItems(
id='Runtime_OD',
options=[{'label': i, 'value': i} for i in ['cpu', 'gpu']],
value='cpu',
labelStyle={'display': 'inline-block'}
),
"Pretrained Model:", dcc.Dropdown(
id='PretrainedModel_OD',
options=[{'label': i, 'value': i} for i in ["classifier_enhanced", "classifier_compact"]],
value='classifier_compact'
),
], style={'width': '15%', 'display': 'inline-block'}),
html.Br(),
html.Br(),
# Parameters inputs
html.Div([
html.Div([
html.Label('Number of Classes'),
dcc.Input(id='NumClasses_OD', value='5', type='number', min=0, step=1, ),
html.Label('Image Width'),
dcc.Input(id='ImWidth_OD', value='960', type='number', min=0, step=1, ),
html.Label('Image Height'),
dcc.Input(id='ImHeight_OD', value='1024', type='number', min=0, step=1, ),
html.Label('Image Channel'),
dcc.Input(id='ImChannel_OD', value='3', type='number', min=0, step=1, ),
html.Label('Capacity'),
dcc.Input(id='Capacity_OD', value='medium', type='text', min=0, step=1, ),
html.Label('Instance Type'),
dcc.Input(id='InstanceType_OD', value='rectangle1', type='text', min=0, step=1, ),
html.Label('Training Percent'),
dcc.Input(id='TrainingPercent_OD', value='75', type='number', min=0, step=1, ),
html.Label('Validation Percent'),
dcc.Input(id='ValidationPercent_OD', value='15', type='number', min=0, step=1, ),
],
style={'width': '15%', 'display': 'inline-block'}),
html.Div([
html.Label('Batch Size'),
dcc.Input(id='BatchSize_OD', value='10', type='number', min=0, step=1, ),
html.Label('Initial Learning Rate'),
dcc.Input(id='InitialLearningRate_OD', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Momentum'),
dcc.Input(id='Momentum_OD', value='0.09', type='number', min=0, step=0.00001, ),
html.Label('Number of Epochs'),
dcc.Input(id='NumEpochs_OD', value='2', type='number', min=0, step=1, ),
html.Label('Change Learning Rate @ Epochs'),
dcc.Input(id='ChangeLearningRateEpochs_OD', value='50,100', type='text'),
html.Label('Learning Rate Schedule'),
dcc.Input(id='lr_change_OD', value='0.01,0.05', type='text'),
html.Label('Regularisation Constant'),
dcc.Input(id='WeightPrior_OD', value='0.001', type='number', min=0, step=0.00001, ),
html.Label('Class Penalty'),
dcc.Input(id='class_penalty_OD', value='0,0', type='text'),
],
style={'width': '15%', 'display': 'inline-block'}),
html.Div([
html.Label('Augmentation Percentage'),
dcc.Input(id='AugmentationPercentage_OD', value='100', type='number', min=0, max=100, step=1, ),
html.Label('Rotation'),
dcc.Input(id='Rotation_OD', value='90', type='number', min=-180, max=180, step=90, ),
html.Label('Mirror (off,c,r,rc)'),
dcc.Input(id='mirror_OD', value='off', type='text', ),
html.Label('Brightness Variation'),
dcc.Input(id='BrightnessVariation_OD', value='0', type='number', min=-100, max=100, step=1, ),
html.Label('Brightness Variation Spot'),
dcc.Input(id='BrightnessVariationSpot_OD', value='0', type='number', min=-100, max=100,
step=1, ),
html.Label('Rotation Range (Step of 1)'),
dcc.Input(id='RotationRange_OD', value='10', type='number', min=1, step=1, ),
# html.Label('Ignore Direction'),
# dcc.Input(id='IgnoreDirection', value='false', type='text'),
# html.Label('Class IDs No Orientation Exist'),
# dcc.Input(id='ClassIDsNoOrientationExist', value='false', type='text'),
# html.Label('Class IDs No Orientation'),
# dcc.Input(id='ClassIDsNoOrientation', value='[]', type='text'),
],
style={'width': '15%', 'float': 'initial', 'display': 'inline-block',
}),
# Estimated Value show and input
html.Div([
html.H4('Halcon estimated values'),
html.P('Key in new desired value or leave it empty: '),
html.Br(),
html.Div([html.P('Min Level: '),
html.Div([html.Div(id='MinLevel_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='MinLevel_Input_OD', placeholder='Integer', type='number', min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Max Level: '),
html.Div([html.Div(id='MaxLevel_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='MaxLevel_Input_OD', placeholder='Integer', type='number', min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Anchor Number of Subscales: '),
html.Div([html.Div(id='AnchorNumSubscales_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='AnchorNumSubscales_Input_OD', placeholder='Integer', type='number',
min=0,
step=1,
debounce=True), ]),
html.Br(),
html.Div([html.P('Anchor Aspect Ratios (min,max,mean,deviation): '),
html.Div([html.Div(id='AnchorAspectRatios_OD'), ],
style={"font": 'Helvetica', 'color': 'Blue'}),
dcc.Input(id='AnchorAspectRatios_Input_OD',
placeholder='List (0.720, 1.475, 2.125, 2.753)',
type='text', min=0, debounce=True, style={'width': '50%', }), ]),
# if user wanna change, type in the desired value.
# value = Best value among 4 read by halcon
# label the value,
],
style={'width': '40%', 'float': 'right'},
),
]),
html.Br(),
html.Br(),
html.Br(),
dcc.Loading(
id="loading_OD",
type="default",
children=[html.Div(id="Training_loading_OD"),
html.Div(id="Estimate_values_loading_OD")]
),
html.Br(),
# Buttons
html.Div([
html.Button(id='estimate_button_OD', n_clicks=0, children='Halcon Estimate Values'),
html.Button(id='operation_button_OD', n_clicks=0, children='Train'),
# html.Button(id='parameters_out_button', n_clicks=0, children='Output Parameters'),
html.Button(id='evaluation_button_OD', n_clicks=0, children='Evaluation'), ],
style={'display': 'flex',
'justify-content': 'center',
'align-items': 'center',
'height': '100px',
}, ),
html.Div([html.Label(id='training_output_OD'), ], style={'display': 'flex',
'justify-content': 'center',
'align-items': 'center',
'height': '50px',
}, ),
# Evaluation Graph
html.Div([
html.Div([html.H2('Evaluation Graph Coming Soon...',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='evaluation_text_OD'),
dcc.Graph(id='evaluation_graph_OD'),
],
style={'width': '100%', 'float': 'initial'}),
dcc.Interval(
id='interval-evaluation_OD',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
], ),
# OD training monitor graph plotter
html.Div([
html.H1('CHaDLE Training Monitor - Object Detection',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Black'
}),
html.Div(id='metrics_OD',
style={
"font": 'Helvetica',
'textAlign': 'center',
'color': 'Blue'
}),
dcc.Graph(id='iteration_loss_graph_OD'),
dcc.Graph(id='mean_ap_graph_OD'),
dcc.Interval(
id='interval_graph_OD',
interval=1 * 1000, # in milliseconds
n_intervals=0
)
])
]),
]),
]),
])
############################################################################################################
############################################## Call Backs ##################################################
############################################################################################################
############################################ Classification ##################################
@app.callback(Output('Operation_output_CL', 'children'),
Output("Training_loading_CL", "children"),
Input('operation_button_CL', 'n_clicks'),
State('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
State('RotationRange_CL', 'value'),
)
def operation_CL(operation_button_CL, ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL, ChangeLearningRateEpochs_CL,
lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL, BrightnessVariation_CL,
BrightnessVariationSpot_CL,
RotationRange_CL):
ctx_operation_CL = dash.callback_context
if not ctx_operation_CL.triggered:
button_id = 'Null'
else:
button_id = ctx_operation_CL.triggered[0]['prop_id'].split('.')[0]
print(button_id)
if button_id == 'Null':
raise PreventUpdate
else:
if button_id == 'operation_button_CL':
pre_process_param = run.pre_process_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL,
ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL, BrightnessVariationSpot_CL,
RotationRange_CL)
DLModelHandle = pre_process_param[0][0]
DLDataset = pre_process_param[1][0]
TrainParam = pre_process_param[2][0]
run.training_CL(DLModelHandle, DLDataset, TrainParam)
metricList.append(DLModelHandle)
metricList.append(DLDataset)
metricList.append(TrainParam)
# run.training(templist[-3], templist[-2], templist[-1])
# run.training(templist[0], templist[1], templist[2])
else:
i = 1
# run.training(templist[-3], templist[-2], templist[-1])
return '', ''
@app.callback(Output('evaluation_graph_CL', 'figure'),
Output('Evaluation_loading_CL', 'children'),
Input('evaluation_button_CL', 'n_clicks'),
State('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
# State('RotationRange', 'value'),
# State('IgnoreDirection', 'value'),
)
def evaluation_CL(evaluation_button_CL, ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL, ChangeLearningRateEpochs_CL,
lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL, BrightnessVariation_CL,
BrightnessVariationSpot_CL,
):
z = [[0, 0], [0, 0]]
x = ['Confusion Matrix', 'Confusion Matrix']
y = ['Confusion Matrix', 'Confusion Matrix']
# change each element of z to type string for annotations
z_text = [[str(y) for y in x] for x in z]
fig = ff.create_annotated_heatmap([[0, 0], [0, 0]], x=x, y=y, annotation_text=z_text, colorscale='Blues')
ctx_evaluation_CL = dash.callback_context
if not ctx_evaluation_CL.triggered:
button_id = 'Null'
else:
button_id = ctx_evaluation_CL.triggered[0]['prop_id'].split('.')[0]
if button_id == 'evaluation_button_CL':
print('Evaluation Started')
evaluationList = run.evaluation_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL,
ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL,
lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL,
BrightnessVariationSpot_CL,
)
z.clear()
x.clear()
y.clear()
z_text.clear()
confusion_matrix_List = evaluationList[0]
mean_precision = evaluationList[1][0]
mean_recall = evaluationList[2][0]
mean_f_score = evaluationList[3][0]
mean_precision = format(mean_precision, '.3f')
mean_recall = format(mean_recall, '.3f')
mean_f_score = format(mean_f_score, '.3f')
categories = run.getImageCategories(ProjectName_CL, 'Classification')[0]
labels = run.getImageCategories(ProjectName_CL, 'Classification')[1]
# threading.Thread(target=evaluation).start()
length = len(categories)
sublist = [confusion_matrix_List[i:i + length] for i in range(0, len(confusion_matrix_List), length)]
for i in sublist:
z.append(i)
for i in categories:
x.append(i)
y.append(i)
# change each element of z to type string for annotations
# z_text = [[str(y) for y in x] for x in z]
# set up figure
z_text = [[str(y) for y in x] for x in z]
fig = ff.create_annotated_heatmap(z, x=x, y=y, annotation_text=z_text, colorscale='Blues')
# change each element of z to type string for annotations
# add title
fig.update_layout(
title_text='Mean Precision: ' + str(mean_precision) + '\n Mean Recall: ' + str(
mean_recall) + '\n Mean F Score: ' + str(mean_f_score),
)
# add custom xaxis title
fig.add_annotation(dict(font=dict(color="black", size=14),
x=0.5,
y=-0.15,
showarrow=False,
text="Ground Truth",
xref="paper",
yref="paper"))
# add custom yaxis title
fig.add_annotation(dict(font=dict(color="black", size=14),
x=-0.1,
y=0.5,
showarrow=False,
text="Prediction",
textangle=-90,
xref="paper",
yref="paper"))
# adjust margins to make room for yaxis title
fig.update_layout(margin=dict(t=50, l=200))
# add colorbar
fig['data'][0]['showscale'] = True
return fig, ' '
# Historical method to produce json file of input parameters
@app.callback(Output('makeJson_CL', 'children'),
# Input('parameters_out_button', 'n_clicks'),
Input('ProjectName_CL', 'value'),
State('Runtime_CL', 'value'),
State('PretrainedModel_CL', 'value'),
State('ImWidth_CL', 'value'),
State('ImHeight_CL', 'value'),
State('ImChannel_CL', 'value'),
State('BatchSize_CL', 'value'),
State('InitialLearningRate_CL', 'value'),
State('Momentum_CL', 'value'),
State('NumEpochs_CL', 'value'),
State('ChangeLearningRateEpochs_CL', 'value'),
State('lr_change_CL', 'value'),
State('WeightPrior_CL', 'value'),
State('class_penalty_CL', 'value'),
State('AugmentationPercentage_CL', 'value'),
State('Rotation_CL', 'value'),
State('mirror_CL', 'value'),
State('BrightnessVariation_CL', 'value'),
State('BrightnessVariationSpot_CL', 'value'),
State('RotationRange_CL', 'value'),
# State('IgnoreDirection', 'value'),
# State('ClassIDsNoOrientationExist', 'value'),
# State('ClassIDsNoOrientation', 'value'),
)
def makeJson_CL(ProjectName_CL, Runtime_CL, PretrainedModel_CL, ImWidth_CL, ImHeight_CL, ImChannel_CL,
BatchSize_CL, InitialLearningRate_CL, Momentum_CL, NumEpochs_CL,
ChangeLearningRateEpochs_CL, lr_change_CL, WeightPrior_CL,
class_penalty_CL, AugmentationPercentage_CL, Rotation_CL, mirror_CL,
BrightnessVariation_CL, BrightnessVariationSpot_CL,
RotationRange_CL):
ParameterDict = {'ProjectName': ProjectName_CL,
'Runtime': Runtime_CL, 'PretrainedModel': PretrainedModel_CL, 'ImWidth': ImWidth_CL,
'ImHeight': ImHeight_CL,
'ImChannel': ImChannel_CL,
'BatchSize': BatchSize_CL, 'InitialLearningRate': InitialLearningRate_CL, 'Momentum': Momentum_CL,
'NumEpochs': NumEpochs_CL,
'ChangeLearningRateEpochs': ChangeLearningRateEpochs_CL, 'lr_change': lr_change_CL,
'WeightPrior': WeightPrior_CL,
'class_penalty': class_penalty_CL, 'AugmentationPercentage': AugmentationPercentage_CL,
'Rotation': Rotation_CL, 'mirror': mirror_CL,
'BrightnessVariation': BrightnessVariation_CL,
'BrightnessVariationSpot': BrightnessVariationSpot_CL,
'RotationRange': RotationRange_CL, }
ctx = dash.callback_context
if not ctx.triggered:
button_id = 'Null'
else:
button_id = ctx.triggered[0]['prop_id'].split('.')[0]
if button_id == 'parameters_out_button':
with open('parameters_json.txt', 'w') as outfile:
json.dump(ParameterDict, outfile)
return 'To json done!'
@app.callback(Output('metrics_CL', 'children'),
Input('interval_graph_CL', 'n_intervals'))
def update_metrics_CL(n):
# Indication Text configuration
# Extract data from Hdict and show as texts.
style = {'padding': '5px', 'fontSize': '16px'}
get_metrics = run.get_TrainInfo_CL()
if get_metrics:
time_elapsed = get_metrics[0]
time_remaining = get_metrics[1]
epoch_metrics = get_metrics[2]
else:
time_elapsed = 0
time_remaining = 0
epoch_metrics = 0
return [
html.Span('Time Elapsed: {}'.format(str(datetime.timedelta(seconds=int(time_elapsed)))), style=style),
html.Span('Time Remaining: {}'.format(time_remaining), style=style),
html.Span('Current Epoch: {}'.format(epoch_metrics), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('iteration_loss_graph_CL', 'figure'),
Input('interval_graph_CL', 'n_intervals'))
def iteration_loss_graph_CL(n):
# Loss Graph configuration
# Using plotly subplots. May consider changing to others.
iteration_loss_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1)
iteration_loss_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 50, 't': 80, 'autoexpand': False,
}
iteration_loss_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left', 'title': 'Loss-Iteration Graph'}
iteration_loss_graph_fig.update_layout(legend_title_text=123)
iteration_loss_graph_fig.update_xaxes(title_text="Iteration", row=1, col=1)
iteration_loss_graph_fig.update_yaxes(title_text="Loss", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getTrainInfo = run.get_TrainInfo_CL()
if not getTrainInfo:
iterationList.clear()
epochOfLossList.clear()
lossList.clear()
else:
epoch_TrainInfo = getTrainInfo[2]
loss = getTrainInfo[3]
iteration = getTrainInfo[4]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
# if iteration not in iterationList:
epochOfLossList.append(epoch_TrainInfo)
lossList.append(loss)
iterationList.append(iteration)
# Add the values to graph and start plotting.
iteration_loss_graph_fig.append_trace({
'x': epochOfLossList,
'y': lossList,
'text': iterationList,
'name': 'iteration vs loss',
'mode': 'lines',
'type': 'scatter'
}, 1, 1)
return iteration_loss_graph_fig
@app.callback(Output('top1_error_graph_CL', 'figure'),
Input('interval_graph_CL', 'n_intervals'))
def top1_error_graph_CL(n):
# Top1 Error Graph configuration.
# Using plotly subplots. May consider changing to others.
top1_error_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1, )
top1_error_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 100, 't': 80, 'autoexpand': False,
}
top1_error_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
top1_error_graph_fig.update_xaxes(title_text="Epoch", row=1, col=1)
top1_error_graph_fig.update_yaxes(title_text="Top1 Error", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getEvaluationInfo = run.get_EvaluationInfo_CL()
if not getEvaluationInfo:
TrainSet_top1_error_valueList.clear()
ValidationSet_top1_error_valueList.clear()
epochOfTop1ErrorList.clear()
else:
epoch_EvaluationInfo = getEvaluationInfo[0]
TrainSet_top1_error_value = getEvaluationInfo[1]
ValidationSet_top1_error_value = getEvaluationInfo[2]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
if TrainSet_top1_error_value not in TrainSet_top1_error_valueList:
epochOfTop1ErrorList.append(epoch_EvaluationInfo)
TrainSet_top1_error_valueList.append(TrainSet_top1_error_value)
ValidationSet_top1_error_valueList.append(ValidationSet_top1_error_value)
# Add the values to graph and start plotting.
# Two plots on the same graph.
top1_error_graph_fig.append_trace({
'x': epochOfTop1ErrorList,
'y': TrainSet_top1_error_valueList,
'name': 'Train Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
top1_error_graph_fig.append_trace({
'x': epochOfTop1ErrorList,
'y': ValidationSet_top1_error_valueList,
'name': 'Validation Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
return top1_error_graph_fig
############################################ Object Detection ################################
@app.callback(Output('MinLevel_OD', 'children'),
Output('MaxLevel_OD', 'children'),
Output('AnchorNumSubscales_OD', 'children'),
Output('AnchorAspectRatios_OD', 'children'),
Output('Estimate_values_loading_OD', 'children'),
Input('estimate_button_OD', 'n_clicks'),
State('ImWidth_OD', 'value'),
State('ImHeight_OD', 'value'),
State('TrainingPercent_OD', 'value'),
State('ValidationPercent_OD', 'value'),
)
def estimate_value_OD(estimate_button_OD, ImWidth_OD, ImHeight_OD, TrainingPercent_OD, ValidationPercent_OD, ):
Label_data_OD = 'C:/Users/930415/Desktop/Chadle_Projects/Chadle_Data/Object_Detection/NTBW_Image Analytics/NTBW_Initial_2.hdict'
ctx_estimate_value_OD = dash.callback_context
if not ctx_estimate_value_OD.triggered:
button_id = 'Null'
else:
button_id = ctx_estimate_value_OD.triggered[0]['prop_id'].split('.')[0]
if button_id == 'estimate_button_OD':
estimate_value = run_OD.estimate_values_OD(ImWidth_OD, ImHeight_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD)
DLDataset_preprocess = (estimate_value[0])
MinLevel_OD = (estimate_value[1])
MaxLevel_OD = (estimate_value[2])
AnchorNumSubscales_OD = (estimate_value[3])
estimate_value = [round(number, 3) for number in estimate_value[4]]
print(estimate_value)
AnchorAspectRatios_OD_String = ", ".join(str(number) for number in estimate_value)
AnchorAspectRatios_OD = AnchorAspectRatios_OD_String
return MinLevel_OD, MaxLevel_OD, AnchorNumSubscales_OD, AnchorAspectRatios_OD, ' '
else:
return ' ', ' ', ' ', ' ', ' '
@app.callback(Output('training_output_OD', 'children'),
Output('Training_loading_OD', 'children'),
Input('operation_button_OD', 'n_clicks'),
# State('ProjectName_OD', 'value'),
State('ImWidth_OD', 'value'),
State('ImHeight_OD', 'value'),
State('TrainingPercent_OD', 'value'),
State('ValidationPercent_OD', 'value'),
State('MinLevel_Input_OD', 'value'),
State('MaxLevel_Input_OD', 'value'),
State('AnchorNumSubscales_Input_OD', 'value'),
State('AnchorAspectRatios_Input_OD', 'value'),
State('ImChannel_OD', 'value'),
State('PretrainedModel_OD', 'value'),
State('InstanceType_OD', 'value'),
State('NumClasses_OD', 'value'),
State('Capacity_OD', 'value'),
State('AugmentationPercentage_OD', 'value'),
State('Rotation_OD', 'value'),
State('mirror_OD', 'value'),
State('BrightnessVariation_OD', 'value'),
State('BrightnessVariationSpot_OD', 'value'),
State('RotationRange_OD', 'value'),
State('BatchSize_OD', 'value'),
State('InitialLearningRate_OD', 'value'),
State('Momentum_OD', 'value'),
State('NumEpochs_OD', 'value'),
State('ChangeLearningRateEpochs_OD', 'value'),
State('lr_change_OD', 'value'),
State('WeightPrior_OD', 'value'),
State('class_penalty_OD', 'value'),
)
def operation_OD(operation_button_OD, ImWidth_OD, ImHeight_OD, TrainingPercent_OD, ValidationPercent_OD,
MinLevel_Input_OD, MaxLevel_Input_OD, AnchorNumSubscales_Input_OD, AnchorAspectRatios_Input_OD,
ImChannel_OD, PretrainedModel_OD, InstanceType_OD, NumClasses_OD, Capacity_OD,
AugmentationPercentage_OD, Rotation_OD, mirror_OD, BrightnessVariation_OD, BrightnessVariationSpot_OD,
RotationRange_OD, BatchSize_OD, InitialLearningRate_OD, Momentum_OD, NumEpochs_OD,
ChangeLearningRateEpochs_OD,
lr_change_OD, WeightPrior_OD, class_penalty_OD):
Label_data_OD = 'C:/Users/930415/Desktop/Chadle_Projects/Chadle_Data/Object_Detection/NTBW_Image Analytics/NTBW_Initial_2.hdict'
ctx_operation_OD = dash.callback_context
if not ctx_operation_OD.triggered:
button_id = 'Null'
else:
button_id = ctx_operation_OD.triggered[0]['prop_id'].split('.')[0]
if button_id == 'operation_button_OD':
estimate_value = run_OD.estimate_values_OD(ImWidth_OD, ImHeight_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD)
DLDataset_preprocess = (estimate_value[0])
# If input empty, use Halcon estimate value.
if MinLevel_Input_OD:
MinLevel_OD = MinLevel_Input_OD
else:
MinLevel_OD = (estimate_value[1])
if MaxLevel_Input_OD:
MaxLevel_OD = MaxLevel_Input_OD
else:
MaxLevel_OD = (estimate_value[2])
if AnchorNumSubscales_Input_OD:
AnchorNumSubscales_OD = AnchorNumSubscales_Input_OD
else:
AnchorNumSubscales_OD = (estimate_value[3])
if AnchorAspectRatios_Input_OD:
AnchorAspectRatios_OD = AnchorAspectRatios_Input_OD.split(',')
else:
AnchorAspectRatios_OD = (estimate_value[4])
print(ImChannel_OD)
preprocess_OD = run_OD.preprocess_OD(ImWidth_OD, ImHeight_OD, ImChannel_OD, TrainingPercent_OD,
ValidationPercent_OD, Label_data_OD,
PretrainedModel_OD,
InstanceType_OD, DLDataset_preprocess,
MinLevel_OD, MaxLevel_OD,
AnchorNumSubscales_OD, AnchorAspectRatios_OD, NumClasses_OD, Capacity_OD)
DLDatasetFileName = preprocess_OD[0]
DLPreprocessParamFileName = preprocess_OD[1]
ModelFileName = preprocess_OD[2]
prepare_for_training_OD = run_OD.prepare_for_training_OD(AugmentationPercentage_OD, Rotation_OD, mirror_OD,
BrightnessVariation_OD, BrightnessVariationSpot_OD,
RotationRange_OD, BatchSize_OD,
InitialLearningRate_OD, Momentum_OD, NumEpochs_OD,
ChangeLearningRateEpochs_OD,
lr_change_OD, WeightPrior_OD, class_penalty_OD,
DLDatasetFileName, DLPreprocessParamFileName,
ModelFileName)
DLModelHandle = prepare_for_training_OD[0][0]
DLDataset = prepare_for_training_OD[1][0]
TrainParam = prepare_for_training_OD[2][0]
# Training
training_OD = run_OD.training_OD(DLDataset, DLModelHandle, TrainParam)
return ' ', ' '
# OD metrics and graphs
@app.callback(Output('metrics_OD', 'children'),
Input('interval_graph_OD', 'n_intervals'))
def update_metrics_OD(n):
# Indication Text configuration
# Extract data from Hdict and show as texts.
style = {'padding': '5px', 'fontSize': '16px'}
get_metrics = run_OD.get_TrainInfo_OD()
if get_metrics:
time_elapsed = get_metrics[0]
time_remaining = get_metrics[1]
epoch_metrics = get_metrics[2]
else:
time_elapsed = 0
time_remaining = 0
epoch_metrics = 0
return [
html.Span('Time Elapsed: {}'.format(str(datetime.timedelta(seconds=int(time_elapsed)))), style=style),
html.Span('Time Remaining: {}'.format(time_remaining), style=style),
html.Span('Current Epoch: {}'.format(epoch_metrics), style=style)
]
# Multiple components can update everytime interval gets fired.
@app.callback(Output('iteration_loss_graph_OD', 'figure'),
Input('interval_graph_OD', 'n_intervals'))
def iteration_loss_graph_CL(n):
# Loss Graph configuration
# Using plotly subplots. May consider changing to others.
iteration_loss_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1)
iteration_loss_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 50, 't': 80, 'autoexpand': False,
}
iteration_loss_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left', 'title': 'Loss-Iteration Graph'}
iteration_loss_graph_fig.update_layout(legend_title_text=123)
iteration_loss_graph_fig.update_xaxes(title_text="Iteration", row=1, col=1)
iteration_loss_graph_fig.update_yaxes(title_text="Loss", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getTrainInfo = run_OD.get_TrainInfo_OD()
if not getTrainInfo:
iterationList.clear()
epochOfLossList.clear()
lossList.clear()
else:
epoch_TrainInfo = getTrainInfo[2]
loss = getTrainInfo[3]
iteration = getTrainInfo[4]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
if iteration not in iterationList:
epochOfLossList.append(epoch_TrainInfo)
lossList.append(loss)
iterationList.append(iteration)
# Add the values to graph and start plotting.
iteration_loss_graph_fig.append_trace({
'x': epochOfLossList,
'y': lossList,
'text': iterationList,
'name': 'iteration vs loss',
'mode': 'lines',
'type': 'scatter'
}, 1, 1)
return iteration_loss_graph_fig
@app.callback(Output('mean_ap_graph_OD', 'figure'),
Input('interval_graph_OD', 'n_intervals'))
def mean_ap_graph_OD(n):
# Mean AP Graph configuration.
# Using plotly subplots. May consider changing to others.
mean_ap_graph_fig = plotly.tools.make_subplots(rows=1, cols=1, vertical_spacing=1, )
mean_ap_graph_fig['layout']['margin'] = {
'l': 80, 'r': 80, 'b': 100, 't': 80, 'autoexpand': False,
}
mean_ap_graph_fig['layout']['legend'] = {'x': 0, 'y': 1, 'xanchor': 'left'}
mean_ap_graph_fig.update_xaxes(title_text="Epoch", row=1, col=1)
mean_ap_graph_fig.update_yaxes(title_text="Top1 Error", row=1, col=1)
# If Hdict files does not exist, clear graph and lists for plotting.
# Therefore, could reset graph by deleting the Hdict files.
getEvaluationInfo = run_OD.get_EvaluationInfo_OD()
if not getEvaluationInfo:
TrainSet_mean_ap_valueList.clear()
ValidationSet_mean_ap_valueList.clear()
epochOfMeanAPList.clear()
else:
epoch_EvaluationInfo = getEvaluationInfo[0]
TrainSet_mean_ap_value = getEvaluationInfo[1]
ValidationSet_mean_ap_value = getEvaluationInfo[2]
# Avoid duplicate output from Halcon.
# Interval for this web app is set to 1 sec. However feedback from Halcon may take up tp 5 secs.
# Using <in> with list, average time complexity: O(n)
# if TrainSet_mean_ap_value not in TrainSet_mean_ap_valueList:
epochOfMeanAPList.append(epoch_EvaluationInfo)
TrainSet_mean_ap_valueList.append(TrainSet_mean_ap_value)
ValidationSet_mean_ap_valueList.append(ValidationSet_mean_ap_value)
# Add the values to graph and start plotting.
# Two plots on the same graph.
mean_ap_graph_fig.append_trace({
'x': epochOfMeanAPList,
'y': TrainSet_mean_ap_valueList,
'name': 'Train Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
mean_ap_graph_fig.append_trace({
'x': epochOfMeanAPList,
'y': ValidationSet_mean_ap_valueList,
'name': 'Validation Set Top1_error',
'mode': 'lines+markers',
'type': 'scatter'
}, 1, 1)
return mean_ap_graph_fig
if __name__ == '__main__':
app.run_server(debug=True)
|
py
|
1a56fb37dedfb15ba22302990051f0cf2acef350
|
# flake8: noqa
# @TODO: code formatting issue for 20.07 release
from typing import List
import logging
from urllib.parse import quote_plus
from urllib.request import Request, urlopen
from catalyst import utils
from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
from catalyst.core.runner import IRunner
from catalyst.tools import settings
class TelegramLogger(Callback):
"""
Logger callback, translates ``runner.metric_manager`` to telegram channel.
"""
def __init__(
self,
token: str = None,
chat_id: str = None,
metric_names: List[str] = None,
log_on_stage_start: bool = True,
log_on_loader_start: bool = True,
log_on_loader_end: bool = True,
log_on_stage_end: bool = True,
log_on_exception: bool = True,
):
"""
Args:
token: telegram bot's token,
see https://core.telegram.org/bots
chat_id: Chat unique identifier
metric_names: List of metric names to log.
if none - logs everything.
log_on_stage_start: send notification on stage start
log_on_loader_start: send notification on loader start
log_on_loader_end: send notification on loader end
log_on_stage_end: send notification on stage end
log_on_exception: send notification on exception
"""
super().__init__(order=CallbackOrder.logging, node=CallbackNode.master)
# @TODO: replace this logic with global catalyst config at ~/.catalyst
self._token = token or settings.telegram_logger_token
self._chat_id = chat_id or settings.telegram_logger_chat_id
assert self._token is not None and self._chat_id is not None
self._base_url = (
f"https://api.telegram.org/bot{self._token}/sendMessage"
)
self.log_on_stage_start = log_on_stage_start
self.log_on_loader_start = log_on_loader_start
self.log_on_loader_end = log_on_loader_end
self.log_on_stage_end = log_on_stage_end
self.log_on_exception = log_on_exception
self.metrics_to_log = metric_names
def _send_text(self, text: str):
try:
url = (
f"{self._base_url}?"
f"chat_id={self._chat_id}&"
f"disable_web_page_preview=1&"
f"text={quote_plus(text, safe='')}"
)
request = Request(url)
urlopen(request) # noqa: S310
except Exception as e:
logging.getLogger(__name__).warning(f"telegram.send.error:{e}")
def on_stage_start(self, runner: IRunner):
"""Notify about starting a new stage."""
if self.log_on_stage_start:
text = f"{runner.stage_name} stage was started"
self._send_text(text)
def on_loader_start(self, runner: IRunner):
"""Notify about starting running the new loader."""
if self.log_on_loader_start:
text = (
f"{runner.loader_name} {runner.global_epoch} epoch has started"
)
self._send_text(text)
def on_loader_end(self, runner: IRunner):
"""Translate ``runner.metric_manager`` to telegram channel."""
if self.log_on_loader_end:
metrics = runner.loader_metrics
if self.metrics_to_log is None:
metrics_to_log = sorted(metrics.keys())
else:
metrics_to_log = self.metrics_to_log
rows: List[str] = [
f"{runner.loader_name} {runner.global_epoch}"
f" epoch was finished:"
]
for name in metrics_to_log:
if name in metrics:
rows.append(utils.format_metric(name, metrics[name]))
text = "\n".join(rows)
self._send_text(text)
def on_stage_end(self, runner: IRunner):
"""Notify about finishing a stage."""
if self.log_on_stage_end:
text = f"{runner.stage_name} stage was finished"
self._send_text(text)
def on_exception(self, runner: IRunner):
"""Notify about raised ``Exception``."""
if self.log_on_exception:
exception = runner.exception
if utils.is_exception(exception) and not isinstance(
exception, KeyboardInterrupt
):
text = (
f"`{type(exception).__name__}` exception was raised:\n"
f"{exception}"
)
self._send_text(text)
__all__ = ["TelegramLogger"]
|
py
|
1a56fb3d8a1702038502ab6f9503a643515a2e78
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from datetime import date
from datetime import datetime
from freezegun import freeze_time
from mock import patch
from ggrc.app import db
from ggrc.notifications import common
from ggrc.models import Notification
from ggrc.models import Person
from ggrc.models import all_models
from ggrc_workflows.models import Cycle
from integration.ggrc import TestCase
from integration.ggrc.access_control import acl_helper
from integration.ggrc.api_helper import Api
from integration.ggrc.generator import ObjectGenerator
from integration.ggrc_workflows.generator import WorkflowsGenerator
class TestOneTimeWorkflowNotification(TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
super(TestOneTimeWorkflowNotification, self).setUp()
self.api = Api()
self.wf_generator = WorkflowsGenerator()
self.object_generator = ObjectGenerator()
self.random_objects = self.object_generator.generate_random_objects()
self.random_people = self.object_generator.generate_random_people(
user_role="Administrator"
)
self.create_test_cases()
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
def test_one_time_wf_activate(self):
def get_person(person_id):
return db.session.query(Person).filter(Person.id == person_id).one()
with freeze_time("2015-04-10"):
_, wf = self.wf_generator.generate_workflow(self.one_time_workflow_1)
_, cycle = self.wf_generator.generate_cycle(wf)
self.wf_generator.activate_workflow(wf)
person_2 = get_person(self.random_people[2].id)
with freeze_time("2015-04-11"):
_, notif_data = common.get_daily_notifications()
self.assertIn(person_2.email, notif_data)
self.assertIn("cycle_started", notif_data[person_2.email])
self.assertIn(cycle.id, notif_data[person_2.email]["cycle_started"])
self.assertIn("my_tasks",
notif_data[person_2.email]["cycle_data"][cycle.id])
person_1 = get_person(self.random_people[0].id)
with freeze_time("2015-05-03"): # two days befor due date
_, notif_data = common.get_daily_notifications()
self.assertIn(person_1.email, notif_data)
self.assertNotIn("due_in", notif_data[person_1.email])
self.assertNotIn("due_today", notif_data[person_1.email])
with freeze_time("2015-05-04"): # one day befor due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[person_1.email]["due_in"]), 1)
with freeze_time("2015-05-05"): # due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[person_1.email]["due_today"]), 1)
@patch("ggrc.notifications.common.send_email")
def test_one_time_wf_activate_single_person(self, mock_mail):
with freeze_time("2015-04-10"):
user = "[email protected]"
_, wf = self.wf_generator.generate_workflow(
self.one_time_workflow_single_person)
_, cycle = self.wf_generator.generate_cycle(wf)
self.wf_generator.activate_workflow(wf)
with freeze_time("2015-04-11"):
_, notif_data = common.get_daily_notifications()
self.assertIn("cycle_started", notif_data[user])
self.assertIn(cycle.id, notif_data[user]["cycle_started"])
self.assertIn("my_tasks", notif_data[user]["cycle_data"][cycle.id])
self.assertIn("cycle_tasks", notif_data[user]["cycle_data"][cycle.id])
self.assertIn(
"my_task_groups", notif_data[user]["cycle_data"][cycle.id])
self.assertIn("cycle_url", notif_data[user]["cycle_started"][cycle.id])
cycle = Cycle.query.get(cycle.id)
cycle_data = notif_data[user]["cycle_data"][cycle.id]
for task in cycle.cycle_task_group_object_tasks:
self.assertIn(task.id, cycle_data["my_tasks"])
self.assertIn(task.id, cycle_data["cycle_tasks"])
self.assertIn("title", cycle_data["my_tasks"][task.id])
self.assertIn("title", cycle_data["cycle_tasks"][task.id])
self.assertIn("cycle_task_url", cycle_data["cycle_tasks"][task.id])
with freeze_time("2015-05-03"): # two days before due date
_, notif_data = common.get_daily_notifications()
self.assertIn(user, notif_data)
self.assertNotIn("due_in", notif_data[user])
self.assertNotIn("due_today", notif_data[user])
with freeze_time("2015-05-04"): # one day before due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[user]["due_in"]), 2)
with freeze_time("2015-05-05"): # due date
_, notif_data = common.get_daily_notifications()
self.assertEqual(len(notif_data[user]["due_today"]), 2)
common.send_daily_digest_notifications()
self.assertEqual(mock_mail.call_count, 1)
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
role_id = all_models.AccessControlRole.query.filter(
all_models.AccessControlRole.name == "Task Assignees",
all_models.AccessControlRole.object_type == "TaskGroupTask",
).one().id
self.one_time_workflow_1 = {
"title": "one time test workflow",
"description": "some test workflow",
"notify_on_change": True,
# admin will be current user with id == 1
"task_groups": [{
"title": "one time task group",
"contact": person_dict(self.random_people[2].id),
"task_group_tasks": [{
"title": "task 1",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[0].id)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}, {
"title": "task 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[1].id)
],
"start_date": date(2015, 5, 4),
"end_date": date(2015, 5, 7),
}],
"task_group_objects": self.random_objects[:2]
}, {
"title": "another one time task group",
"contact": person_dict(self.random_people[2].id),
"task_group_tasks": [{
"title": "task 1 in tg 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[0].id)
],
"start_date": date(2015, 5, 8), # friday
"end_date": date(2015, 5, 12),
}, {
"title": "task 2 in tg 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, self.random_people[2].id)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}],
"task_group_objects": []
}]
}
user = Person.query.filter(Person.email == "[email protected]").one().id
self.one_time_workflow_single_person = {
"title": "one time test workflow",
"notify_on_change": True,
"description": "some test workflow",
# admin will be current user with id == 1
"task_groups": [{
"title": "one time task group",
"contact": person_dict(user),
"task_group_tasks": [{
"title": u"task 1 \u2062 WITH AN UMBRELLA ELLA ELLA. \u2062",
"description": "some task. ",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}, {
"title": "task 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 4),
"end_date": date(2015, 5, 7),
}],
"task_group_objects": self.random_objects[:2]
}, {
"title": "another one time task group",
"contact": person_dict(user),
"task_group_tasks": [{
"title": u"task 1 \u2062 WITH AN UMBRELLA ELLA ELLA. \u2062",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 8), # friday
"end_date": date(2015, 5, 12),
}, {
"title": "task 2 in tg 2",
"description": "some task",
"access_control_list": [
acl_helper.get_acl_json(role_id, user)
],
"start_date": date(2015, 5, 1), # friday
"end_date": date(2015, 5, 5),
}],
"task_group_objects": []
}]
}
|
py
|
1a56fbb746d4119bf0ec7336099c41079b5f1db6
|
#!/usr/bin/env python3
import argparse
import logging
from db_test_meter.database import Database
from db_test_meter.util import init_logger, collect_user_input, AppConfig
def create_db(db: Database) -> None:
"""
Utility to create the db and table for the sync check
:param db:
:return:
"""
try:
log.debug(f'creating database {AppConfig.TEST_DB_NAME}')
db.run_query(f"DROP DATABASE IF EXISTS {AppConfig.TEST_DB_NAME}")
db.run_query(f"CREATE DATABASE IF NOT EXISTS {AppConfig.TEST_DB_NAME}")
log.debug(f'creating table {AppConfig.TEST_DB_TABLE}')
db.run_query(
f"CREATE TABLE {AppConfig.TEST_DB_NAME}.{AppConfig.TEST_DB_TABLE} (`test_run_id` varchar(50) NOT NULL, `index_id` int(10) unsigned NOT NULL, `created` int(8) NOT NULL)")
print(f'Database {AppConfig.TEST_DB_NAME} created')
print(f'Table {AppConfig.TEST_DB_NAME}.{AppConfig.TEST_DB_TABLE} created')
except Exception as e:
print(f'There was an error: {e}')
parser = argparse.ArgumentParser(
'simple utility to create the db and table used by failover_test.py. Usage: ./create_failover_sync_db.py')
parser.add_argument('--debug', action='store_true')
init_logger(debug=parser.parse_args().debug)
log = logging.getLogger()
print('This will destroy and recreate sync database and tracking table')
if (input("enter y to continue, n to exit [n]: ") or 'n').lower() == 'y':
db_connection_metadata = collect_user_input()
db = Database(db_connection_metadata)
create_db(db)
else:
print('exiting...')
|
py
|
1a56fc82be2d8e04021709fa01f21944ba2cca57
|
# -*- coding: utf-8 -*-
# Data paths
DATA_DIR = "~/data/"
# Neo4J connectors
ADMIN_NAME = "neo4j"
ADMIN_PASS = "neo4jbinder"
URL = "bolt://localhost:7687"
ENCODING = 'ISO-8859-1'
ENGINE = "python"
|
py
|
1a56fd023572c56a190b4850eb9c94033636ac1f
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1. / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=400, fc2_units=300, batch_norm=True):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.batch_norm = batch_norm
self.fc1 = nn.Linear(state_size, fc1_units)
if batch_norm:
self.bn1 = nn.BatchNorm1d(fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
if batch_norm:
self.bn2 = nn.BatchNorm1d(fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
if self.batch_norm:
x = F.relu(self.bn1(self.fc1(state)))
x = F.relu(self.bn2(self.fc2(x)))
else:
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return torch.tanh(self.fc3(x))
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=400, fc2_units=300, batch_norm=True):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.batch_norm = batch_norm
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.bn1 = nn.BatchNorm1d(fcs1_units)
self.fc2 = nn.Linear(fcs1_units+action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-3e-3, 3e-3)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
if self.batch_norm:
xs = F.relu(self.bn1(self.fcs1(state)))
else:
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
|
py
|
1a56fe13d403456e308ecfea11284c04a9f89056
|
from functools import reduce
from operator import __mul__
import pytest
from treevalue.tree import func_treelize, TreeValue, method_treelize, classmethod_treelize, delayed
# noinspection DuplicatedCode
@pytest.mark.unittest
class TestTreeFuncFunc:
def test_tree_value_type(self):
class _MyTreeValue(TreeValue):
pass
@func_treelize(return_type=_MyTreeValue)
def ssum(*args):
return sum(args)
t1 = TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeValue({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 44}})
tr1 = ssum(t1, t2)
assert tr1 != TreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr1 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert isinstance(tr1, _MyTreeValue)
assert isinstance(tr1.x, _MyTreeValue)
@func_treelize(return_type=_MyTreeValue)
def ssum2(*args):
return sum(args), reduce(__mul__, args, 1)
tr2 = ssum2(t1, t2)
assert tr2 == _MyTreeValue({'a': (12, 11), 'b': (24, 44), 'x': {'c': (36, 99), 'd': (48, 176)}})
@func_treelize(return_type=_MyTreeValue, rise=True)
def ssum3(*args):
return sum(args), reduce(__mul__, args, 1)
tr3, tr4 = ssum3(t1, t2)
assert tr3 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr4 == _MyTreeValue({'a': 11, 'b': 44, 'x': {'c': 99, 'd': 176}})
@func_treelize(return_type=_MyTreeValue, subside=True, rise=dict(template=(None, None)))
def ssum4(args):
return sum(args), reduce(__mul__, args, 1)
tr5, tr6 = ssum4([t1, t2])
assert tr5 == _MyTreeValue({'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}})
assert tr6 == _MyTreeValue({'a': 11, 'b': 44, 'x': {'c': 99, 'd': 176}})
@func_treelize()
def ssum5(a, b, c):
return a + b * c
t3 = TreeValue({'a': 31, 'b': 12, 'x': {'c': 43, 'd': 24}})
assert ssum5(1, c=3, b=5) == 16
assert ssum5(t2, c=t1, b=t3) == TreeValue({
'a': 42,
'b': 46,
'x': {
'c': 162,
'd': 140,
}
})
assert ssum5(t2, c=2, b=t3) == TreeValue({
'a': 73,
'b': 46,
'x': {
'c': 119,
'd': 92,
}
})
@func_treelize('outer', missing=lambda: 1)
def ssum6(a, b, c):
return a + b * c
t4 = TreeValue({'a': 31, 'b': 12, 'x': {'c': 43}})
with pytest.raises(KeyError):
ssum5(t2, c=2, b=t4)
assert ssum6(t2, c=2, b=t4) == TreeValue({
'a': 73,
'b': 46,
'x': {
'c': 119,
'd': 46,
}
})
@func_treelize('left')
def ssum7(a, b, c):
return a + b * c
with pytest.raises(KeyError):
ssum7(t2, c=2, b=t4)
@func_treelize(inherit=False)
def ssum8(a, b, c):
return a + b * c
with pytest.raises(TypeError):
ssum8(t2, c=2, b=t1)
def test_tree_value_type_none(self):
@func_treelize(return_type=None)
def ssum(*args):
return sum(args)
t1 = TreeValue({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeValue({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 44}})
tr1 = ssum(t1, t2)
assert tr1 is None
def test_tree_value_type_invalid(self):
class _MyTreeValue:
pass
with pytest.raises(TypeError):
# noinspection PyTypeChecker
@func_treelize(return_type=_MyTreeValue)
def ssum(*args):
return sum(args)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
@func_treelize(return_type=233)
def ssum(*args):
return sum(args)
def test_method_treelize(self):
class TreeNumber(TreeValue):
@method_treelize()
def _attr_extern(self, key):
return getattr(self, key)
@method_treelize('outer', missing=0)
def __add__(self, other):
return self + other
@method_treelize('outer', missing=0)
def __radd__(self, other):
return other + self
@method_treelize('outer', missing=0)
def __sub__(self, other):
return self - other
@method_treelize('outer', missing=0)
def __rsub__(self, other):
return other - self
@method_treelize()
def __pos__(self):
return +self
@method_treelize()
def __neg__(self):
return -self
@method_treelize()
def __call__(self, *args, **kwargs):
return self(*args, **kwargs)
@method_treelize(return_type=TreeValue)
def damn_it(self, x):
return self + x
t1 = TreeNumber({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
t2 = TreeNumber({'a': 11, 'b': 22, 'x': {'c': 33, 'd': 5}})
assert (t1 + t2 + 1) == TreeNumber({'a': 13, 'b': 25, 'x': {'c': 37, 'd': 10}})
assert (t1 - t2) == TreeNumber({'a': -10, 'b': -20, 'x': {'c': -30, 'd': -1}})
assert (1 - t2) == TreeNumber({'a': -10, 'b': -21, 'x': {'c': -32, 'd': -4}})
assert t1.damn_it(2) == TreeValue({'a': 3, 'b': 4, 'x': {'c': 5, 'd': 6}})
class P:
def __init__(self, value):
self.__value = value
@property
def value(self):
return self.__value
def vv(self):
return self.__value + 1
ttt = TreeNumber({"a": P(1), "b": P(2), "x": {"c": P(3), "d": P(4)}})
assert ttt.value == TreeNumber({'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4}})
assert ttt.vv() == TreeNumber({'a': 2, 'b': 3, 'x': {'c': 4, 'd': 5}})
with pytest.warns(UserWarning):
class MyTreeValue(TreeValue):
@method_treelize(self_copy=True, rise=True)
def __iadd__(self, other):
return self + other
def test_classmethod_treelize(self):
class TestUtils:
@classmethod
@classmethod_treelize('outer', missing=0, return_type=TreeValue)
def add(cls, a, b):
return cls, a + b
@classmethod
@classmethod_treelize(return_type=TreeValue)
def add2(cls, a, b):
return cls, a + b
assert TestUtils.add(1, 2) == (TestUtils, 3)
assert TestUtils.add(TreeValue({'a': 1, 'b': 2}), 2) == TreeValue({'a': (TestUtils, 3), 'b': (TestUtils, 4)})
assert TestUtils.add2(TreeValue({'a': 1, 'b': 2}), TreeValue({'a': 12, 'b': 22})) == TreeValue(
{'a': (TestUtils, 13), 'b': (TestUtils, 24)})
class MyTreeValue(TreeValue):
@classmethod
@classmethod_treelize()
def plus(cls, x, y):
return x + y
assert MyTreeValue.plus(TreeValue({'a': 1, 'b': 2}), 2) == MyTreeValue({'a': 3, 'b': 4})
def test_missing(self):
@func_treelize(mode='outer', missing=lambda: [])
def append(arr: list, *args):
for item in args:
if item:
arr.append(item)
return arr
t0 = TreeValue({})
t1 = TreeValue({'a': 2, 'b': 7, 'x': {'c': 4, 'd': 9}})
t2 = TreeValue({'a': 4, 'b': 48, 'x': {'c': -11, 'd': 54}})
t3 = TreeValue({'a': 9, 'b': -12, 'x': {'c': 3, 'd': 4}})
assert append(t0, t1, t2, t3) == TreeValue({
'a': [2, 4, 9],
'b': [7, 48, -12],
'x': {
'c': [4, -11, 3],
'd': [9, 54, 4],
}
})
t0 = TreeValue({})
t1 = TreeValue({'a': 2, 'x': {'c': 4, 'd': 9}})
t2 = TreeValue({'a': 4, 'b': 48, 'x': {'d': 54}})
t3 = TreeValue({'b': -12, 'x': 7, 'y': {'e': 3, 'f': 4}})
assert append(t0, t1, t2, t3) == TreeValue({
'a': [2, 4],
'b': [48, -12],
'x': {
'c': [4, 7],
'd': [9, 54, 7],
},
'y': {
'e': [3],
'f': [4],
},
})
def test_delay_support(self):
@func_treelize(return_type=TreeValue)
def f(x, y, z):
return x + y * 2 + z * 3
t1 = TreeValue({
'a': 1,
'b': delayed(lambda x: x ** 2, 3),
'c': {'x': 2, 'y': delayed(lambda: 4)},
})
t2 = TreeValue({
'a': delayed(lambda x: x + 1, t1.a),
'b': delayed(lambda: t1.c.y),
'c': delayed(lambda: 5),
})
t3 = delayed(lambda: 6)
assert f(t1, t2, t3) == TreeValue({
'a': 23, 'b': 35,
'c': {'x': 30, 'y': 32},
})
t1 = TreeValue({
'a': 1,
'b': delayed(lambda x: x ** 2, 3),
'c': {'x': 2, 'y': delayed(lambda: 4)},
})
t2 = TreeValue({
'a': delayed(lambda x: x + 1, t1.a),
'b': delayed(lambda: t1.c.y),
'c': delayed(lambda: 5),
})
t3 = delayed(lambda: 6)
assert f(x=t1, y=t2, z=t3) == TreeValue({
'a': 23, 'b': 35,
'c': {'x': 30, 'y': 32},
})
def test_delayed_treelize(self):
t1 = TreeValue({
'a': 1, 'b': 2, 'x': {'c': 3, 'd': 4},
})
t2 = TreeValue({
'a': 11, 'b': 23, 'x': {'c': 35, 'd': 47},
})
cnt_1 = 0
@func_treelize(delayed=True)
def total(a, b):
nonlocal cnt_1
cnt_1 += 1
return a + b
# positional
t3 = total(t1, t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}
})
assert cnt_1 == 4
# keyword
cnt_1 = 0
t3 = total(a=t1, b=t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}
})
assert cnt_1 == 4
# positional, with constant
cnt_1 = 0
t3 = total(1, t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 36, 'd': 48})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}
})
assert cnt_1 == 4
# keyword, with constant
cnt_1 = 0
t3 = total(b=1, a=t2)
assert cnt_1 == 0
assert t3.a == 12
assert cnt_1 == 1
assert t3.x == TreeValue({'c': 36, 'd': 48})
assert cnt_1 == 3
assert t3 == TreeValue({
'a': 12, 'b': 24, 'x': {'c': 36, 'd': 48}
})
assert cnt_1 == 4
# positional, with delay
cnt_1 = 0
t4 = TreeValue({'v': delayed(lambda: t1)})
t5 = TreeValue({'v': delayed(lambda: t2)})
t6 = total(t4, t5)
assert cnt_1 == 0
assert t6.v.a == 12
assert cnt_1 == 1
assert t6.v.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t6 == TreeValue({
'v': {'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}},
})
assert cnt_1 == 4
# keyword, with delay
cnt_1 = 0
t4 = TreeValue({'v': delayed(lambda: t1)})
t5 = TreeValue({'v': delayed(lambda: t2)})
t6 = total(a=t4, b=t5)
assert cnt_1 == 0
assert t6.v.a == 12
assert cnt_1 == 1
assert t6.v.x == TreeValue({'c': 38, 'd': 51})
assert cnt_1 == 3
assert t6 == TreeValue({
'v': {'a': 12, 'b': 25, 'x': {'c': 38, 'd': 51}},
})
assert cnt_1 == 4
|
py
|
1a56fe4c66b7e3812182fd07f2529a29c380e34c
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Handle mapping from old network building function names to new names.
Flexible network configuration is achieved by specifying the function name that
builds a network module (e.g., the name of the conv backbone or the mask roi
head). However we may wish to change names over time without breaking previous
config files. This module provides backwards naming compatibility by providing
a mapping from the old name to the new name.
When renaming functions, it's generally a good idea to codemod existing yaml
config files. An easy way to batch edit, by example, is a shell command like
$ find . -name "*.yaml" -exec sed -i -e \
's/head_builder\.add_roi_2mlp_head/fast_rcnn_heads.add_roi_2mlp_head/g' {} \;
to perform the renaming:
head_builder.add_roi_2mlp_head => fast_rcnn_heads.add_roi_2mlp_head
"""
_RENAME = {
# Removed "ResNet_" from the name because it wasn't relevent
'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v1up4convs':
'mask_rcnn_heads.mask_rcnn_fcn_head_v1up4convs',
# Removed "ResNet_" from the name because it wasn't relevent
'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v1up':
'mask_rcnn_heads.mask_rcnn_fcn_head_v1up',
# Removed "ResNet_" from the name because it wasn't relevent
'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v0upshare':
'mask_rcnn_heads.mask_rcnn_fcn_head_v0upshare',
# Removed "ResNet_" from the name because it wasn't relevent
'mask_rcnn_heads.ResNet_mask_rcnn_fcn_head_v0up':
'mask_rcnn_heads.mask_rcnn_fcn_head_v0up',
# Removed head_builder module in favor of the more specific fast_rcnn name
'head_builder.add_roi_2mlp_head':
'fast_rcnn_heads.add_roi_2mlp_head',
}
def get_new_name(func_name):
if func_name in _RENAME:
func_name = _RENAME[func_name]
return func_name
|
py
|
1a56ff00bac2efc955e29e1f6a6be2672a850cad
|
__author__ = 'Simon'
import json
import re
import urllib.request
import html
fnc_base_url = 'http://cbateam.github.io/CBA_A3/docs/index/'
fnc_page_names = ['Functions', 'Functions2', 'Functions3']
fncPrefix = 'CBA_fnc_'
macro_base_url = 'http://cbateam.github.io/CBA_A3/docs/files/main/script_macros_common-hpp.html'
f = urllib.request.urlopen(macro_base_url)
content = f.read().decode("utf-8")
f.close()
allMacros = re.findall(r'<div class=CTopic><h3 class=CTitle><a name="[^"]*"></a>([^<]*)</h3><div class=CBody>(.*?)</div>',content,re.DOTALL)
for macroContent in allMacros:
c = re.search(r'(.*?)(<h4 class=CHeading>Parameters</h4>.*?)?(<h4 class=CHeading>Example</h4>.*?)?(?:<h4 class=CHeading>Author</h4>.*?)',macroContent[1],re.DOTALL)
if c:
description = c.group(1)
descriptionTable = re.findall(r'<tr><td class=CDLEntry>(.*?)</td><td class=CDLDescription>(.*?)</td></tr>',macroContent[1],re.DOTALL)
for tableEntry in descriptionTable:
re.search(r'(.*?)(<h4 class=CHeading>Parameters</h4>.*?)?(<h4 class=CHeading>Example</h4>.*?)?(?:<h4 class=CHeading>Author</h4>.*?)',macroContent[1],re.DOTALL)
output = []
functionList = []
for fnc_page in fnc_page_names:
f = urllib.request.urlopen(fnc_base_url + fnc_page + '.html')
content = f.read().decode("utf-8")
f.close()
allFunctions = re.findall(r'<a[^>]*href\s*=\s*"([^"]*)"[^>]*class=ISymbol[^>]*>([^<]*)',content)
for function in allFunctions:
outputTemplate = {}
outputTemplate['rightLabel'] = "CBA Function"
outputTemplate['text'] = ''
outputTemplate['description'] = ''
outputTemplate['type'] = 'function'
outputTemplate['descriptionMoreURL'] = fnc_base_url + function[0]
print(function[1])
functionList.append(function[1])
f = urllib.request.urlopen(outputTemplate['descriptionMoreURL'])
content = f.read().decode("utf-8")
f.close()
nameRegex = re.search(r'<a name="([^"]*)">',content)
if nameRegex:
outputTemplate['text'] = nameRegex.group(1)
descriptionRegex = re.search(r'<h4 class=CHeading>Description</h4>(.*)<h4 class=CHeading>Parameters</h4>',content)
if descriptionRegex:
outputTemplate['description'] = str(html.unescape(re.sub(r'(<[^<]+?>)','',descriptionRegex.group(1)).strip()))
output.append(outputTemplate)
autocompleteDict = {
'.source.sqf': {
'autocomplete': {
'symbols':{
'CBAfunctions':{
'suggestions': output
}
}
}
}
}
with open('../settingsAvailable/language-sqf-functions-cba.json', 'w') as f:
json.dump(autocompleteDict,f,indent=2)
with open('grammars-sqf-functions-cba.json', 'w') as f:
f.write('|'.join(functionList))
print("\nCopy contents of 'grammars-sqf-functions-cba.json' into the 'support.function.cba.sqf' section of 'grammars/sqf.json'")
|
py
|
1a56ff37124ad1ac1d2528592b6646dd9b4dac8b
|
#
# TODO: BIG DISCLAIMER -- The trace visualization / window does *not* make
# use of the MVC pattern that the other widgets do.
#
# this is mainly due to the fact that it was prototyped last, and I haven't
# gotten around to moving the 'logic' out of window/widget classes and into
# a dedicated controller class.
#
# this will probably happen sooner than later, to keep everything consistent
#
from tenet.types import BreakpointType
from tenet.util.qt import *
from tenet.util.misc import register_callback, notify_callback
#------------------------------------------------------------------------------
# TraceView
#------------------------------------------------------------------------------
# TODO/XXX: ugly
BORDER_SIZE = 1
LOCKON_DISTANCE = 4
class TraceBar(QtWidgets.QWidget):
"""
A trace visualization.
"""
def __init__(self, core, zoom=False, parent=None):
super(TraceBar, self).__init__(parent)
self.core = core
self._is_zoom = zoom
# misc widget settings
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.setMouseTracking(True)
self.setMinimumSize(32, 32)
# the rendered trace visualization
self._image = QtGui.QImage()
#
# setup trace colors / pens / brushes
#
# r / w / x accesses
self.color_read = self.core.palette.mem_read_bg
self.color_write = self.core.palette.mem_write_bg
self.color_exec = self.core.palette.breakpoint
# current idx
self.color_cursor = self.core.palette.trace_cursor
self.cursor_pen = QtGui.QPen(self.color_cursor, 1, QtCore.Qt.SolidLine)
# zoom / region selection
self.color_selection = self.core.palette.trace_selection
self.color_selection_border = self.core.palette.trace_selection_border
self.pen_selection = QtGui.QPen(self.color_selection, 2, QtCore.Qt.SolidLine)
self.brush_selection = QtGui.QBrush(QtCore.Qt.Dense6Pattern)
self.brush_selection.setColor(self.color_selection_border)
self._last_hovered = None
self.start_idx = 0
self.end_idx = 0
self.density = 0
self._width = 0
self._height = 0
self._selection_origin = -1
self._selection_start = -1
self._selection_end = -1
self._executions = []
self._reads = []
self._writes = []
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
self._selection_changed_callbacks = []
def _focused_breakpoint_changed(self, breakpoint):
"""
The focused breakpoint has changed.
"""
self._refresh_breakpoint_hits(breakpoint)
self.refresh()
def _refresh_breakpoint_hits(self, breakpoint):
self._executions = []
self._reads = []
self._writes = []
if not self.isVisible():
return
if not (self.core.reader and breakpoint):
return
if breakpoint.type == BreakpointType.EXEC:
self._executions = self.core.reader.get_executions_between(breakpoint.address, self.start_idx, self.end_idx, self.density)
elif breakpoint.type == BreakpointType.ACCESS:
if breakpoint.length == 1:
self._reads, self._writes = self.core.reader.get_memory_accesses_between(breakpoint.address, self.start_idx, self.end_idx, self.density)
else:
self._reads, self._writes = self.core.reader.get_memory_region_accesses_between(breakpoint.address, breakpoint.length, self.start_idx, self.end_idx, self.density)
else:
raise NotImplementedError
def attach_reader(self, reader):
# clear out any existing state
self.reset()
# save the reader
self.reader = reader
# initialize state based on the reader
self.set_zoom(0, reader.trace.length)
# attach signals to the new reader
reader.idx_changed(self.refresh)
def reset(self):
"""
TODO
"""
self.reader = None
self.start_idx = 0
self.end_idx = 0
self.density = 0
self._selection_origin = -1
self._selection_start = -1
self._selection_end = -1
self._executions = []
self._reads = []
self._writes = []
self.refresh()
def refresh(self, *args):
"""
TODO
"""
self._draw_trace()
self.update()
def _idx2pos(self, idx):
"""
Translate a given Y coordinate to an approximate IDX.
"""
relative_idx = idx - self.start_idx
y = int(relative_idx / self.density) + BORDER_SIZE
if y < BORDER_SIZE:
y = BORDER_SIZE
elif y > (self._height - BORDER_SIZE):
y = self._height
return y
def _pos2idx(self, y):
"""
Translate a given Y coordinate to an approximate IDX.
"""
y -= BORDER_SIZE
relative_idx = round(y * self.density)
idx = self.start_idx + relative_idx
# clamp IDX to the start / end of the trace
if idx < self.start_idx:
idx = self.start_idx
elif idx > self.end_idx:
idx = self.end_idx
return idx
def _compute_pixel_distance(self, y, idx):
"""
Compute the pixel distance from a given y to an IDX.
"""
y_idx = ((idx - self.start_idx) / self.density) - BORDER_SIZE
distance_pixels = abs(y-y_idx)
return distance_pixels
def _update_hover(self, current_y):
"""
TODO
"""
# fast path / nothing to do if hovered position hasn't changed
if self._last_hovered and self._last_hovered[1] == current_y:
return
# see if there's an interesting trace event close to the hover
hovered_idx = self._pos2idx(current_y)
closest_idx = self._get_closest_visible_idx(hovered_idx)
px_distance = self._compute_pixel_distance(current_y, closest_idx)
#print(f" HOVERED IDX {hovered_idx:,}, CLOSEST IDX {closest_idx:,}, DIST {px_distance}")
painter = QtGui.QPainter(self._image)
LINE_WIDTH = self._width - (BORDER_SIZE * 2)
# unpaint the last hovered line with the position/color we stored for it
if self._last_hovered:
old_data, prev_y = self._last_hovered
length = min(len(old_data), self._image.width() * 4)
current_data = self._image.scanLine(prev_y).asarray(length)
for i in range(length):
current_data[i] = old_data[i]
# nothing close, so don't bother painting a highlight
if px_distance >= LOCKON_DISTANCE:
self._last_hovered = None
#print("NOTHING CLOSE!")
self._draw_cursor(painter)
return
locked_y = self._idx2pos(closest_idx)
# overwrite last_hovered with the latest hover position / color we will stomp
current_line_data = self._image.scanLine(locked_y)
old_data = [x for x in current_line_data.asarray(4 * self._image.width())]
self._last_hovered = (old_data, locked_y)
#self._last_hovered = (self._image.pixelColor(LINE_WIDTH//2, locked_y), locked_y)
# paint the currently hovered line
painter.setPen(self.cursor_pen)
painter.drawLine(BORDER_SIZE, locked_y, LINE_WIDTH, locked_y)
#print("PAINTED NEW!")
self._draw_cursor(painter)
def set_zoom(self, start_idx, end_idx):
"""
TODO
"""
#print("Setting Zoom!", start_idx, end_idx)
# save the first and last timestamps to be shown
self.start_idx = start_idx
self.end_idx = end_idx
# compute the number of instructions visible
self.length = (end_idx - start_idx)
# compute the number of instructions per y pixel
self.density = self.length / (self._height - BORDER_SIZE * 2)
self._refresh_breakpoint_hits(self.core.breakpoints.model.focused_breakpoint)
self.refresh()
def set_selection(self, start_idx, end_idx):
"""
TODO
"""
self._selection_end = end_idx
self._selection_start = start_idx
self.refresh()
def _global_selection_changed(self, start_idx, end_idx):
if start_idx == end_idx:
return
self.set_selection(start_idx, end_idx)
def _zoom_selection_changed(self, start_idx, end_idx):
if start_idx == end_idx:
self.hide()
else:
self.show()
self.set_zoom(start_idx, end_idx)
def highlight_executions(self, idxs):
self._executions = idxs
self.refresh()
def _draw_trace(self):
w, h = self._width, self._height
self._last_hovered = None
self._image = QtGui.QImage(w, h, QtGui.QImage.Format_RGB32)
if not self.reader:
self._image.fill(self.core.palette.trace_bedrock)
else:
self._image.fill(self.core.palette.trace_instruction)
painter = QtGui.QPainter(self._image)
#
# draw accesses along the trace timeline
#
self._draw_accesses(painter)
#
# draw region selection
#
self._draw_selection(painter)
#
# draw border around trace timeline
#
border_pen = QtGui.QPen(self.core.palette.trace_border, 1, QtCore.Qt.SolidLine)
painter.setPen(border_pen)
# top & bottom
painter.drawLine(0, 0, w, 0)
painter.drawLine(0, h-1, w, h-1)
# left & right
painter.drawLine(0, 0, 0, h)
painter.drawLine(w-1, 0, w-1, h)
#
# draw current trace position cursor
#
self._draw_cursor(painter)
def _draw_accesses(self, painter):
"""
Draw read / write / execs accesses on the trace timeline.
"""
access_sets = \
[
(self._reads, self.color_read),
(self._writes, self.color_write),
(self._executions, self.color_exec),
]
for entries, color in access_sets:
painter.setPen(color)
for idx in entries:
# skip entries that fall outside the visible zoom
if not(self.start_idx <= idx < self.end_idx):
continue
relative_idx = idx - self.start_idx
y = int(relative_idx / self.density) + BORDER_SIZE
painter.drawLine(0, y, self._width, y)
def _draw_cursor(self, painter):
"""
Draw the user cursor / current position in the trace.
"""
if not self.reader:
return
path = QtGui.QPainterPath()
size = 13
assert size % 2, "Cursor triangle size must be odd"
# rebase the absolute trace cursor idx to the current 'zoomed' view
relative_idx = self.reader.idx - self.start_idx
if relative_idx < 0:
return False
# compute the y coordinate / line to center the user cursor around
cursor_y = int(relative_idx / self.density) + BORDER_SIZE
# the top point of the triangle
top_x = 0
top_y = cursor_y - (size // 2) # vertically align the triangle so the tip matches the cross section
#print("TOP", top_x, top_y)
# bottom point of the triangle
bottom_x = top_x
bottom_y = top_y + size - 1
#print("BOT", bottom_x, bottom_y)
# the 'tip' of the triangle pointing into towards the center of the trace
tip_x = top_x + (size // 2)
tip_y = top_y + (size // 2)
#print("CURSOR", tip_x, tip_y)
# start drawing from the 'top' of the triangle
path.moveTo(top_x, top_y)
# generate the triangle path / shape
path.lineTo(bottom_x, bottom_y)
path.lineTo(tip_x, tip_y)
path.lineTo(top_x, top_y)
painter.setPen(self.cursor_pen)
painter.drawLine(0, cursor_y, self._width, cursor_y)
# paint the defined triangle
# TODO: don't hardcode colors
painter.setPen(QtCore.Qt.black)
painter.setBrush(QtGui.QBrush(QtGui.QColor("red")))
painter.drawPath(path)
def _draw_selection(self, painter):
"""
Draw a region selection rect.
"""
#print("DRAWING SELECTION?", self._selection_start, self._selection_end)
if self._selection_start == self._selection_end:
return
start_y = int((self._selection_start - self.start_idx) / self.density)
end_y = int((self._selection_end - self.start_idx) / self.density)
painter.setBrush(self.brush_selection)
painter.setPen(self.pen_selection)
painter.drawRect(
BORDER_SIZE, # x
start_y+BORDER_SIZE, # y
self._width - (BORDER_SIZE * 2), # width
end_y - start_y - (BORDER_SIZE * 2) # height
)
def wheelEvent(self, event):
if not self.reader:
return
mod = QtGui.QGuiApplication.keyboardModifiers()
step_over = bool(mod & QtCore.Qt.ShiftModifier)
if event.angleDelta().y() > 0:
self.reader.step_backward(1, step_over)
elif event.angleDelta().y() < 0:
self.reader.step_forward(1, step_over)
self.refresh()
event.accept()
def _update_selection(self, y):
idx_event = self._pos2idx(y)
if idx_event > self._selection_origin:
self._selection_start = self._selection_origin
self._selection_end = idx_event
else:
self._selection_end = self._selection_origin
self._selection_start = idx_event
def mouseMoveEvent(self, event):
#mod = QtGui.QGuiApplication.keyboardModifiers()
#if mod & QtCore.Qt.ShiftModifier:
# print("SHIFT IS HELD!!")
#import ida_kernwin
#ida_kernwin.refresh_idaview_anyway()
if event.buttons() == QtCore.Qt.MouseButton.LeftButton:
self._update_selection(event.y())
self.refresh()
else:
self._update_hover(event.y())
self.update()
def mousePressEvent(self, event):
"""
Qt override to capture mouse button presses
"""
if event.button() == QtCore.Qt.MouseButton.LeftButton:
idx_origin = self._pos2idx(event.y())
self._selection_origin = idx_origin
self._selection_start = idx_origin
self._selection_end = idx_origin
return
def _get_closest_visible_idx(self, idx):
"""
Return the closest IDX (timestamp) to the given IDX.
"""
closest_idx = -1
smallest_distace = 999999999999999999999999
for entries in [self._reads, self._writes, self._executions]:
for current_idx in entries:
distance = abs(idx - current_idx)
if distance < smallest_distace:
closest_idx = current_idx
smallest_distace = distance
return closest_idx
#overridden event to capture mouse button releases
def mouseReleaseEvent(self, event):
if not self.reader:
return
# if the left mouse button was released...
if event.button() == QtCore.Qt.MouseButton.LeftButton:
#
# the initial 'click' origin is not set, so that means the 'click'
# event did not start over this widget... or is something we
# should just ignore.
#
if self._selection_origin == -1:
return
#
# clear the selection origin as we will be consuming the
# selection event in the followin codepath
#
self._selection_origin = -1
#
# if the user selection appears to be a 'click' vs a zoom / range
# selection, then seek to the clicked address
#
if self._selection_start == self._selection_end:
selected_idx = self._selection_start
#clear_focus = True
#
# if there is a highlighted bp near the click, we should lock
# onto that instead...
#
closest_idx = self._get_closest_visible_idx(selected_idx)
current_y = self._idx2pos(self._selection_start)
px_distance = self._compute_pixel_distance(current_y, closest_idx)
if px_distance < LOCKON_DISTANCE:
selected_idx = closest_idx
# clear_focus = False
#elif self._is_zoom:
# clear_focus = False
#
# jump to the selected area
#
#print(f"Jumping to {selected_idx:,}")
self.reader.seek(selected_idx)
#if clear_focus:
# self.core.breakpoints.model.focused_breakpoint = None
self._notify_selection_changed(selected_idx, selected_idx)
self.refresh()
return
if self._is_zoom:
new_start = self._selection_start
new_end = self._selection_end
self._selection_start = self._selection_end = -1
self.set_zoom(new_start, new_end)
self._notify_selection_changed(new_start, new_end)
else:
self._notify_selection_changed(self._selection_start, self._selection_end)
def leaveEvent(self, event):
self.refresh()
def keyPressEvent(self, e):
#print("PRESSING", e.key(), e.modifiers())
pass
def keyReleaseEvent(self, e):
#print("RELEASING", e.key(), e.modifiers())
pass
def resizeEvent(self, event):
size = event.size()
self._width, self._height = self.width(), self.height()
self.density = self.length / (self._height - BORDER_SIZE * 2)
#self._refresh_breakpoint_hits(breakpoint)
self.refresh()
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0, 0, self._image)
#----------------------------------------------------------------------
# Callbacks
#----------------------------------------------------------------------
def selection_changed(self, callback):
"""
Subscribe a callback for a trace slice selection change event.
"""
register_callback(self._selection_changed_callbacks, callback)
def _notify_selection_changed(self, start_idx, end_idx):
"""
Notify listeners of a trace slice selection change event.
"""
notify_callback(self._selection_changed_callbacks, start_idx, end_idx)
class TraceView(QtWidgets.QWidget):
def __init__(self, core, parent=None):
super(TraceView, self).__init__(parent)
self.core = core
self._init_ui()
def _init_ui(self):
"""
TODO
"""
self._init_bars()
self._init_ctx_menu()
def attach_reader(self, reader):
self.trace_global.attach_reader(reader)
self.trace_local.attach_reader(reader)
self.trace_local.hide()
def detach_reader(self):
self.trace_global.reset()
self.trace_local.reset()
self.trace_local.hide()
def _init_bars(self):
"""
TODO
"""
self.trace_local = TraceBar(self.core, zoom=True)
self.trace_global = TraceBar(self.core)
# connect the local view to follow the global selection
self.trace_global.selection_changed(self.trace_local._zoom_selection_changed)
self.trace_local.selection_changed(self.trace_global._global_selection_changed)
# connect other signals
self.core.breakpoints.model.focused_breakpoint_changed(self.trace_global._focused_breakpoint_changed)
self.core.breakpoints.model.focused_breakpoint_changed(self.trace_local._focused_breakpoint_changed)
# hide the zoom bar by default
self.trace_local.hide()
# setup the layout and spacing for the tracebar
hbox = QtWidgets.QHBoxLayout(self)
hbox.setContentsMargins(3, 3, 3, 3)
hbox.setSpacing(3)
# add the layout container / mechanism to the toolbar
hbox.addWidget(self.trace_local)
hbox.addWidget(self.trace_global)
self.setLayout(hbox)
def _init_ctx_menu(self):
"""
TODO
"""
self._menu = QtWidgets.QMenu()
# create actions to show in the context menu
self._action_load = self._menu.addAction("Load new trace")
self._action_close = self._menu.addAction("Close trace")
# install the right click context menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._ctx_menu_handler)
#--------------------------------------------------------------------------
# Signals
#--------------------------------------------------------------------------
def _ctx_menu_handler(self, position):
action = self._menu.exec_(self.mapToGlobal(position))
if action == self._action_load:
self.core.interactive_load_trace(True)
elif action == self._action_close:
self.core.close_trace()
# if a tracebar got added, we need to update the layout
def update_from_model(self):
for bar in self.model.tracebars.values()[::-1]:
self.hbox.addWidget(bar)
# this will insert the children (tracebars) and apply spacing as appropriate
self.bar_container.setLayout(self.hbox)
#-----------------------------------------------------------------------------
# Dockable Trace Visualization
#-----------------------------------------------------------------------------
# TODO: refactor out to trace controller / dock model
class TraceDock(QtWidgets.QToolBar):
"""
A Qt 'Toolbar' to house the TraceBar visualizations.
We use a Toolbar explicitly because they are given unique docking regions
around the QMainWindow in Qt-based applications. This allows us to pin
the visualizations to areas where they will not be dist
"""
def __init__(self, core, parent=None):
super(TraceDock, self).__init__(parent)
self.core = core
self.view = TraceView(core, self)
self.setMovable(False)
self.setContentsMargins(0, 0, 0, 0)
self.addWidget(self.view)
def attach_reader(self, reader):
self.view.attach_reader(reader)
def detach_reader(self):
self.view.detach_reader()
|
py
|
1a5702e2d592b1a3b10851b3b237d2e43ab10947
|
"""
Post-processing function that takes a case_data_set and outputs a csv file
"""
import csv
# pylint: disable=E0611,F0401
from openmdao.main.case import flatten_obj
def caseset_query_to_csv(data, filename='cases.csv', delimiter=',', quotechar='"'):
"""
Post-processing function that takes a case_data_set and outputs a csv
file. Should be able to pass tests of current csv case recorder (column
ordering, meta column, etc...) Assume query by case (not variable).
Inputs:
data - results of fetch on Query object
"""
cds = data.cds
drivers = {}
for driver in cds.drivers:
drivers[driver['_id']] = driver['name']
# Determine inputs & outputs, map pseudos to expression names.
expressions = cds.simulation_info['expressions']
metadata = cds.simulation_info['variable_metadata']
inputs = []
outputs = []
pseudos = {}
for name in sorted(data[0].keys()):
# All inputs and outputs that change.
if name in metadata:
if metadata[name]['iotype'] == 'in':
inputs.append(name)
else:
outputs.append(name)
# Include objectives and constraints from all simulation levels.
elif '_pseudo_' in name and not name.endswith('.out0'):
for exp_name, exp_dict in expressions.items():
if exp_dict['pcomp_name'] == name:
pseudos[name] = '%s(%s)' % (exp_dict['data_type'], exp_name)
break
else:
raise RuntimeError('Cannot find %r in expressions' % name)
outputs.append(name)
# Allow private vars from components.
elif '.' in name:
outputs.append(name)
# Open CSV file
outfile = open(filename, 'wb')
csv_writer = csv.writer(outfile, delimiter=delimiter,
quotechar=quotechar,
quoting=csv.QUOTE_NONNUMERIC)
# No automatic data type conversion is performed unless the
# QUOTE_NONNUMERIC format option is specified (in which case unquoted
# fields are transformed into floats).
# Write the data
# data is a list of lists where the inner list is the values and metadata
# for a case
sorted_input_keys = []
sorted_input_values = []
sorted_output_keys = []
sorted_output_values = []
for i, row in enumerate( data ):
input_keys = []
input_values = []
for name in inputs:
obj = row[ row.name_map[ name ] ]
for key, value in flatten_obj(name, obj):
input_keys.append(key)
input_values.append(value)
output_keys = []
output_values = []
for name in outputs:
obj = row[ row.name_map[ name ] ]
if name in pseudos:
name = pseudos[name]
for key, value in flatten_obj(name, obj):
output_keys.append(key)
output_values.append(value)
# This should not be necessary, however python's csv writer
# is not writing boolean variables correctly as strings.
for index, item in enumerate(input_values):
if isinstance(item, bool):
input_values[index] = str(item)
for index, item in enumerate(output_values):
if isinstance(item, bool):
output_values[index] = str(item)
# Sort the columns alphabetically.
if len(input_keys) > 0:
sorted_input_keys, sorted_input_values = \
(list(item) for item in zip(*sorted(zip(input_keys,
input_values))))
if len(output_keys) > 0:
sorted_output_keys, sorted_output_values = \
(list(item) for item in zip(*sorted(zip(output_keys,
output_values))))
if outfile is None:
raise RuntimeError('Attempt to record on closed recorder')
if i == 0:
headers = ['timestamp', '/INPUTS']
headers.extend(sorted_input_keys)
headers.append('/OUTPUTS')
headers.extend(sorted_output_keys)
headers.extend(['/METADATA', 'uuid', 'parent_uuid', 'msg'])
csv_writer.writerow(headers)
header_size = len(headers)
timestamp = row[ row.name_map[ 'timestamp' ] ]
csv_data = [timestamp]
csv_data.append('')
csv_data.extend(sorted_input_values)
csv_data.append('')
csv_data.extend(sorted_output_values)
exc = row[ row.name_map[ 'error_message' ] ]
msg = '' if exc is None else str(exc)
case_uuid = row[ row.name_map[ '_id' ] ]
parent_uuid = row[ row.name_map[ '_parent_id' ] ]
csv_data.extend(['', case_uuid, parent_uuid, msg])
if header_size != len(csv_data):
raise RuntimeError("number of data points (%d) doesn't match header"
" size (%d) in CSV recorder"
% (len(csv_data), header_size))
csv_writer.writerow(csv_data)
outfile.close()
|
py
|
1a57041a5c37b40576a1d671f6bb6e541b0c5b5c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=40
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.Z.on(input_qubit[2])) # number=36
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=30
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=31
c.append(cirq.H.on(input_qubit[3])) # number=32
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.X.on(input_qubit[3])) # number=34
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=35
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=29
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[1])) # number=20
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=24
c.append(cirq.Z.on(input_qubit[3])) # number=25
c.append(cirq.H.on(input_qubit[0])) # number=37
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=38
c.append(cirq.H.on(input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=21
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=22
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3094.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
py
|
1a5705afc43a1412e026818bef13e3ab4551bd67
|
from tkinter import Frame, Canvas, Tk, Text, LEFT, INSERT, END, messagebox, Button, X
from alarming_service import time_diff
def GUI_present():
root = Tk()
canvas = Canvas(root)
canvas.pack()
frame = Frame(canvas)
frame.pack()
top_text = Text(frame)
top_text.insert(
INSERT,
"Welcome to the Simple Alarming Service"
)
top_text.pack()
alarm_set = Button(frame, text="Set and Deploy Alarm", command=time_diff)
alarm_set.pack(fill=X)
root.mainloop()
if __name__ == "__main__":
GUI_present()
|
py
|
1a5705d766d70213905af07cd2676a7ee725b194
|
"""
This is a file includes the main function for gym atari reinforcement learning.
"""
import os
import gym
import numpy as np
from argparse_decorate import init_parser, add_arg
from deep_q_learning import DQLAtari
from sac_discrete import SoftActorCriticsDiscrete
@init_parser()
@add_arg('--start_episode', type=int, default=0, help='A number for start episode index.')
@add_arg('--eval', type=bool, default=False, help='True means evaluate model only.')
@add_arg('--game_index', type=int, default=1, choices=[0, 1, 2],
help='Represent Breakout, MsPacman and Pong respectively.')
@add_arg('--env_name', type=str, default=None, help='The name of the gym atari environment.')
@add_arg('--memory_size', type=int, default=100000, help='The size of the memory space.')
@add_arg('--start_epsilon', type=float, default=1.0, help='The probability for random actions.')
@add_arg('--min_epsilon', type=float, default=0.05, help='The probability for random actions.')
@add_arg('--reward_clip', type=bool, default=False, help='Clip reward in [-1, 1] range if True.')
@add_arg('--live_penalty', type=bool, default=True, help='Penalties when agent lose a life in the game.')
@add_arg('--agent', type=str, default='dsac', choices=['dql', 'dsac'],
help='Deep Q-learning and discrete soft Actor-Critics algorithms.')
def main(**kwargs):
"""
The main function for gym atari reinforcement learning.
"""
atari_game = ['BreakoutNoFrameskip-v4', 'MsPacmanNoFrameskip-v4', 'PongNoFrameskip-v4']
env_name = kwargs['env_name'] if kwargs['env_name'] is not None else atari_game[kwargs['game_index']]
dirs = './' + env_name
if not os.path.exists(dirs):
os.makedirs(dirs)
img_size = (4, 84, 84)
env = gym.make(env_name)
memory_par = (kwargs['memory_size'], img_size)
action_space = np.array([i for i in range(env.action_space.n)], dtype=np.uint8)
game = (env_name, env, kwargs['live_penalty'])
if kwargs['agent'] == 'dql':
agent = DQLAtari(memory_par=memory_par,
action_space=action_space,
game=game,
reward_clip=kwargs['reward_clip'],
epsilon=(kwargs['start_epsilon'], kwargs['min_epsilon']))
elif kwargs['agent'] == 'dsac':
agent = SoftActorCriticsDiscrete(memory_par=memory_par,
action_space=action_space,
game=game,
reward_clip=kwargs['reward_clip'])
else:
raise Exception('The agent does not exist.')
agent.simulate(net_path=dirs, start_episodes=kwargs['start_episode'], eval=kwargs['eval'], start_frames=0)
if __name__ == '__main__':
main()
|
py
|
1a5708762d01c0f99729b99b5009c0e704c429df
|
class GPX:
def __init__(self,filename):
self.fd = open(filename,"w")
self.fd.write('<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>\n')
self.fd.write('<gpx version="1.1"\n')
self.fd.write(' creator="Osmawalk - https://github.com/xtompok/osmawalk"\n')
self.fd.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n')
self.fd.write(' xmlns="http://www.topografix.com/GPX/1/1"\n')
self.fd.write(' xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd">\n')
self.wpts = []
def close(self):
for wpt in self.wpts:
self.fd.write(str(wpt))
self.wpts = []
self.fd.write('</gpx>\n');
self.fd.close()
def writeTrack(self,track):
self.startTrack()
for point in track:
self.writeTrkpt(point[0],point[1],point[2])
self.endTrack()
def addWpt(self,name,lat,lon,ele):
self.wpts.append(Wpt(name,lat,lon,ele))
def startTrack(self):
self.fd.write('<trk>\n<trkseg>\n')
def endTrack(self):
self.fd.write('</trkseg>\n</trk>\n')
for wpt in self.wpts:
self.fd.write(str(wpt))
self.wpts = []
def writeTrkpt(self,lat,lon,ele):
self.fd.write(' <trkpt lat="'+str(lat)+'" lon="'+str(lon)+'">\n')
self.fd.write(' <ele>'+str(ele)+'</ele>\n')
self.fd.write(' </trkpt>\n')
class Wpt:
def __init__(self,aname,alat,alon,anele):
self.name = aname
self.lat = alat
self.lon = alon
self.ele = anele
def __str__(self):
start='<wpt lat="'+str(self.lat)+'" lon="'+str(self.lon)+'">\n'
inner=' <name>'+str(self.name)+'</name>\n <ele>'+str(self.ele)+'</ele>\n'
end='</wpt>\n'
return start+inner+end
|
py
|
1a5708893b9418def560d0d21f81666e3fd866ba
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# OnVBA documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 14 15:46:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['ntemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OnVBA'
copyright = '2015, Rolenun'
author = 'Rolenun'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['nstatic']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'OnVBAdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OnVBA.tex', 'OnVBA Documentation',
'Rolenun', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'onvba', 'OnVBA Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OnVBA', 'OnVBA Documentation',
author, 'OnVBA', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
py
|
1a570898a3e24436162b59cd902370c05929daec
|
import logging
from .dispatcher import Dispatcher
from .function import promised_type_of as promised_type_of1
from .type import (
ComparableType,
as_type,
TypeType,
promised_type_of as promised_type_of2,
is_type,
)
from .util import multihash
__all__ = ["parametric", "type_parameter", "kind", "Kind", "List", "Tuple", "type_of"]
log = logging.getLogger(__name__)
_dispatch = Dispatcher()
@_dispatch
def _get_id(x):
return id(x)
@_dispatch
def _get_id(x: {int, float, str, TypeType}):
return x
class CovariantType(type):
"""A metaclass that implements *covariance* of parametric types."""
def __subclasscheck__(self, subclass):
if hasattr(subclass, "_is_parametric"):
# Check that they are instances of the same parametric type.
if subclass.__bases__ == self.__bases__:
par_subclass = type_parameter(subclass)
par_self = type_parameter(self)
# Check that the type parameters are types.
if is_type(par_subclass) and is_type(par_self):
return as_type(par_subclass) <= as_type(par_self)
# Default behaviour to `type`s subclass check.
return type.__subclasscheck__(self, subclass)
def parametric(Class):
"""A decorator for parametric classes."""
subclasses = {}
if not issubclass(Class, object): # pragma: no cover
raise RuntimeError(
f"To let {Class} be a parametric class, it must be a new-style class."
)
def __new__(cls, *ps):
# Convert type parameters.
ps = tuple(_get_id(p) for p in ps)
# Only create new subclass if it doesn't exist already.
if ps not in subclasses:
def __new__(cls, *args, **kw_args):
return Class.__new__(cls)
# Create subclass.
name = Class.__name__ + "{" + ",".join(str(p) for p in ps) + "}"
SubClass = type.__new__(
CovariantType,
name,
(ParametricClass,),
{"__new__": __new__, "_is_parametric": True},
)
SubClass._type_parameter = ps[0] if len(ps) == 1 else ps
SubClass.__module__ = Class.__module__
# Attempt to correct docstring.
try:
SubClass.__doc__ = Class.__doc__
except AttributeError: # pragma: no cover
pass
subclasses[ps] = SubClass
return subclasses[ps]
# Create parametric class.
ParametricClass = type(Class.__name__, (Class,), {"__new__": __new__})
ParametricClass.__module__ = Class.__module__
# Attempt to correct docstring.
try:
ParametricClass.__doc__ = Class.__doc__
except AttributeError: # pragma: no cover
pass
return ParametricClass
@_dispatch
def type_parameter(x):
"""Get the type parameter of an instance of a parametric type.
Args:
x (object): Instance of a parametric type.
Returns:
object: Type parameter.
"""
return x._type_parameter
def kind(SuperClass=object):
"""Create a parametric wrapper type for dispatch purposes.
Args:
SuperClass (type): Super class.
Returns:
object: New parametric type wrapper.
"""
@parametric
class Kind(SuperClass):
def __init__(self, *xs):
self.xs = xs
def get(self):
return self.xs[0] if len(self.xs) == 1 else self.xs
return Kind
Kind = kind() #: A default kind provided for convenience.
@parametric
class _ParametricList(list):
"""Parametric list type."""
class List(ComparableType):
"""Parametric list Plum type.
Args:
el_type (type or ptype): Element type.
"""
def __init__(self, el_type):
self._el_type = as_type(el_type)
def __hash__(self):
return multihash(List, self._el_type)
def __repr__(self):
return f"ListType({self._el_type})"
def get_types(self):
return (_ParametricList(self._el_type),)
@property
def parametric(self):
return True
@parametric
class _ParametricTuple(tuple):
"""Parametric tuple type."""
class Tuple(ComparableType):
"""Parametric tuple Plum type.
Args:
el_type (type or ptype): Element type.
"""
def __init__(self, el_type):
self._el_type = as_type(el_type)
def __hash__(self):
return multihash(Tuple, self._el_type)
def __repr__(self):
return f"TupleType({self._el_type})"
def get_types(self):
return (_ParametricTuple(self._el_type),)
@property
def parametric(self):
return True
def _types_of_iterable(xs):
types = {type_of(x) for x in xs}
if len(types) == 1:
return list(types)[0]
else:
return as_type(types)
def type_of(obj):
"""Get the Plum type of an object.
Args:
obj (object): Object to get type of.
Returns
ptype: Plum type of `obj`.
"""
if isinstance(obj, list):
return List(_types_of_iterable(obj))
if isinstance(obj, tuple):
return Tuple(_types_of_iterable(obj))
return as_type(type(obj))
# Deliver `type_of`.
promised_type_of1.deliver(type_of)
promised_type_of2.deliver(type_of)
|
py
|
1a5708c99e855c2cec806f6e9801c3419a62ebc3
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Ensure that projects with multiple translation catalogs maintain translation
files correctly.
"""
# There was a bug that affected the `Translate()` when no targets were provided
# explicitelly via argument list. If, for exapmle, `pkg1/SConscript` and
# `pkg2/SConscript` scripts in some project `p1` had:
#
# Translate(LINGUAS_FILE = 1)
#
# then target languages defined in `pkg1/LINGUAS` would affect the list of
# target languages emitted by `pkg2/SConscript` and vice versa.
#
# The pull request #64 on bitbucket fixes this. Here is the test case to
# replicate the bug.
import TestSCons
from os import path
test = TestSCons.TestSCons()
if not test.where_is('xgettext'):
test.skip_test("could not find 'xgettext'; skipping test(s)\n")
if not test.where_is('msgmerge'):
test.skip_test("Could not find 'msgmerge'; skipping test(s)\n")
if not test.where_is('msginit'):
test.skip_test("could not find 'msginit'; skipping test(s)\n")
if not test.where_is('msgfmt'):
test.skip_test("could not find 'msgfmt'; skipping test(s)\n")
#############################################################################
# Test case 1
#############################################################################
test.subdir(['tc1'])
test.write( ['tc1', 'SConstruct'],
"""
env = Environment( tools = ["default", "gettext"] )
env.Replace(POAUTOINIT = 1)
env.Replace(LINGUAS_FILE = 1)
SConscript(["pkg1/SConscript", "pkg2/SConscript"], exports = ["env"])
""")
# package `pkg1`
test.subdir(['tc1', 'pkg1'])
test.write( ['tc1', 'pkg1', 'LINGUAS'],
"""
en
pl
""")
test.write( ['tc1', 'pkg1', 'SConscript'],
"""
Import("env")
env.Translate(source = ['a.cpp'])
""")
test.write(['tc1', 'pkg1', 'a.cpp'], """ gettext("Hello from pkg1/a.cpp") """ )
# package `pkg2`
test.subdir(['tc1', 'pkg2'])
test.write( ['tc1', 'pkg2', 'LINGUAS'],
"""
de
fr
""")
test.write( ['tc1', 'pkg2', 'SConscript'],
"""
Import("env")
env.Translate(source = ['b.cpp'])
""")
test.write(['tc1', 'pkg2', 'b.cpp'], """ gettext("Hello from pkg2/b.cpp") """ )
# NOTE: msgmerge(1) prints its messages to stderr, we must ignore them,
# So, stderr=None is crucial here. It is no point to match stderr to some
# specific valuse; the messages are internationalized :) ).
test.run(arguments = 'po-update', chdir = 'tc1', stderr = None)
test.must_exist( ['tc1', 'pkg1', 'en.po'] )
test.must_exist( ['tc1', 'pkg1', 'pl.po'] )
test.must_not_exist(['tc1', 'pkg1', 'de.po'] )
test.must_not_exist(['tc1', 'pkg1', 'fr.po'] )
test.must_exist( ['tc1', 'pkg2', 'de.po'] )
test.must_exist( ['tc1', 'pkg2', 'fr.po'] )
test.must_not_exist(['tc1', 'pkg2', 'en.po'] )
test.must_not_exist(['tc1', 'pkg2', 'pl.po'] )
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py
|
1a57092aae12bd2905d0f0af2e41efe8d6d2fe43
|
# coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations. # noqa: E501
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LaunchLinkSchema(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'launch_link': 'str'
}
attribute_map = {
'launch_link': 'launchLink'
}
def __init__(self, launch_link=None): # noqa: E501
"""LaunchLinkSchema - a model defined in Swagger""" # noqa: E501
self._launch_link = None
self.discriminator = None
self.launch_link = launch_link
@property
def launch_link(self):
"""Gets the launch_link of this LaunchLinkSchema. # noqa: E501
:return: The launch_link of this LaunchLinkSchema. # noqa: E501
:rtype: str
"""
return self._launch_link
@launch_link.setter
def launch_link(self, launch_link):
"""Sets the launch_link of this LaunchLinkSchema.
:param launch_link: The launch_link of this LaunchLinkSchema. # noqa: E501
:type: str
"""
if launch_link is None:
raise ValueError("Invalid value for `launch_link`, must not be `None`") # noqa: E501
self._launch_link = launch_link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LaunchLinkSchema, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LaunchLinkSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a5709c49604d7fad74c928f5495cdfc40928877
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
py
|
1a570a50db7e13c542b8b3bea9b390c37ca5e6ab
|
import os
import sys
import psutil
from monk.pytorch_prototype import prototype
from monk.compare_prototype import compare
from monk.pip_unit_tests.pytorch.common import print_start
from monk.pip_unit_tests.pytorch.common import print_status
import torch
import numpy as np
from monk.pytorch.losses.return_loss import load_loss
def test_block_inception_c(system_dict):
forward = True;
test = "test_block_inception_c";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.inception_c_block(channels_7x7=3, pool_type="avg"));
network.append(gtf.inception_c_block(channels_7x7=3, pool_type="max"));
gtf.Compile_Network(network, data_shape=(1, 64, 64), use_gpu=False);
x = torch.randn(1, 1, 64, 64);
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
|
py
|
1a570d4a334fef732efc4018f5034f967e099077
|
import asyncio
import logging
import time
from typing import Iterable
import uvicorn
from fastapi import FastAPI
from fastapi.responses import RedirectResponse
from async_batch.batch_processor import BatchProcessor, TaskQueue
log = logging.getLogger(__file__)
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(asctime)-15s %(levelprefix)s %(message)s",
"use_colors": None,
},
"access": {
"()": "uvicorn.logging.AccessFormatter",
"fmt": '%(asctime)-15s %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s',
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"access": {
"formatter": "access",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"loggers": {
"": {"handlers": ["default"], "level": "DEBUG"},
"uvicorn.error": {"level": "DEBUG"},
"uvicorn.access": {"handlers": ["access"], "level": "DEBUG", "propagate": False},
},
}
class ExampleBatchProcessor(BatchProcessor):
def _process(self, batch_data: Iterable[int]) -> Iterable[int]:
return [x ** 2 for x in batch_data]
def __init__(self, batch_size: int):
self.batch_size = batch_size
def get_batch_size(self) -> int:
return self.batch_size
tq = TaskQueue(
batch_processor=ExampleBatchProcessor(2),
max_wait_time=3
)
app = FastAPI(
title="Async Batcher Example Project",
version="0.1",
description="Async Batch Project",
)
app.task_queue = tq
@app.on_event("startup")
async def start_task_queue():
log.info("Starting Task Queue")
app.task_queue.start()
@app.on_event("shutdown")
async def start_task_queue():
log.info("Stopping Task Queue")
app.task_queue.stop()
@app.get("/")
async def read_root():
"""
Got to document
"""
return RedirectResponse("docs")
@app.post("/set/interval")
async def set_interval(interval: float):
"""
Got to document
"""
app.task_queue._interval = interval
return {"status": "success"}
@app.post("/set/batch_size")
async def set_batch_size(batch_size: int):
"""
Got to document
"""
app.task_queue._batch_processor.batch_size = batch_size
return {"status": "success"}
@app.get("/test")
async def api_test(number: int):
log.info(f"Request come in with number={number}")
if not app.task_queue.is_alive():
if not app.task_queue.stop():
app.task_queue.start()
start_time = time.time()
data = await asyncio.wait_for(
app.task_queue.async_submit(number),
timeout=app.task_queue._interval + 1.0
)
spent = time.time() - start_time
return {
"status": "success",
"result": data,
"used_time": spent
}
if __name__ == '__main__':
uvicorn.run(app, log_config=LOGGING_CONFIG)
|
py
|
1a570e48e9d329f52891965f088cc09bb3bba822
|
"""Registers functions to be called if an exception or signal occurs."""
import functools
import logging
import signal
import traceback
# pylint: disable=unused-import, no-name-in-module
from acme.magic_typing import Any, Callable, Dict, List, Union
# pylint: enable=unused-import, no-name-in-module
from certbot import errors
from certbot.compat import os
logger = logging.getLogger(__name__)
# _SIGNALS stores the signals that will be handled by the ErrorHandler. These
# signals were chosen as their default handler terminates the process and could
# potentially occur from inside Python. Signals such as SIGILL were not
# included as they could be a sign of something devious and we should terminate
# immediately.
if os.name != "nt":
_SIGNALS = [signal.SIGTERM]
for signal_code in [signal.SIGHUP, signal.SIGQUIT,
signal.SIGXCPU, signal.SIGXFSZ]:
# Adding only those signals that their default action is not Ignore.
# This is platform-dependent, so we check it dynamically.
if signal.getsignal(signal_code) != signal.SIG_IGN:
_SIGNALS.append(signal_code)
else:
# POSIX signals are not implemented natively in Windows, but emulated from the C runtime.
# As consumed by CPython, most of handlers on theses signals are useless, in particular
# SIGTERM: for instance, os.kill(pid, signal.SIGTERM) will call TerminateProcess, that stops
# immediately the process without calling the attached handler. Besides, non-POSIX signals
# (CTRL_C_EVENT and CTRL_BREAK_EVENT) are implemented in a console context to handle the
# CTRL+C event to a process launched from the console. Only CTRL_C_EVENT has a reliable
# behavior in fact, and maps to the handler to SIGINT. However in this case, a
# KeyboardInterrupt is raised, that will be handled by ErrorHandler through the context manager
# protocol. Finally, no signal on Windows is electable to be handled using ErrorHandler.
#
# Refs: https://stackoverflow.com/a/35792192, https://maruel.ca/post/python_windows_signal,
# https://docs.python.org/2/library/os.html#os.kill,
# https://www.reddit.com/r/Python/comments/1dsblt/windows_command_line_automation_ctrlc_question
_SIGNALS = []
class ErrorHandler(object):
"""Context manager for running code that must be cleaned up on failure.
The context manager allows you to register functions that will be called
when an exception (excluding SystemExit) or signal is encountered.
Usage::
handler = ErrorHandler(cleanup1_func, *cleanup1_args, **cleanup1_kwargs)
handler.register(cleanup2_func, *cleanup2_args, **cleanup2_kwargs)
with handler:
do_something()
Or for one cleanup function::
with ErrorHandler(func, args, kwargs):
do_something()
If an exception is raised out of do_something, the cleanup functions will
be called in last in first out order. Then the exception is raised.
Similarly, if a signal is encountered, the cleanup functions are called
followed by the previously received signal handler.
Each registered cleanup function is called exactly once. If a registered
function raises an exception, it is logged and the next function is called.
Signals received while the registered functions are executing are
deferred until they finish.
"""
def __init__(self, func, *args, **kwargs):
self.call_on_regular_exit = False
self.body_executed = False
self.funcs = [] # type: List[Callable[[], Any]]
self.prev_handlers = {} # type: Dict[int, Union[int, None, Callable]]
self.received_signals = [] # type: List[int]
if func is not None:
self.register(func, *args, **kwargs)
def __enter__(self):
self.body_executed = False
self._set_signal_handlers()
def __exit__(self, exec_type, exec_value, trace):
self.body_executed = True
retval = False
# SystemExit is ignored to properly handle forks that don't exec
if exec_type is SystemExit:
return retval
elif exec_type is None:
if not self.call_on_regular_exit:
return retval
elif exec_type is errors.SignalExit:
logger.debug("Encountered signals: %s", self.received_signals)
retval = True
else:
logger.debug("Encountered exception:\n%s", "".join(
traceback.format_exception(exec_type, exec_value, trace)))
self._call_registered()
self._reset_signal_handlers()
self._call_signals()
return retval
def register(self, func, *args, **kwargs):
# type: (Callable, *Any, **Any) -> None
"""Sets func to be run with the given arguments during cleanup.
:param function func: function to be called in case of an error
"""
self.funcs.append(functools.partial(func, *args, **kwargs))
def _call_registered(self):
"""Calls all registered functions"""
logger.debug("Calling registered functions")
while self.funcs:
try:
self.funcs[-1]()
except Exception: # pylint: disable=broad-except
logger.error("Encountered exception during recovery: ", exc_info=True)
self.funcs.pop()
def _set_signal_handlers(self):
"""Sets signal handlers for signals in _SIGNALS."""
for signum in _SIGNALS:
prev_handler = signal.getsignal(signum)
# If prev_handler is None, the handler was set outside of Python
if prev_handler is not None:
self.prev_handlers[signum] = prev_handler
signal.signal(signum, self._signal_handler)
def _reset_signal_handlers(self):
"""Resets signal handlers for signals in _SIGNALS."""
for signum in self.prev_handlers:
signal.signal(signum, self.prev_handlers[signum])
self.prev_handlers.clear()
def _signal_handler(self, signum, unused_frame):
"""Replacement function for handling received signals.
Store the received signal. If we are executing the code block in
the body of the context manager, stop by raising signal exit.
:param int signum: number of current signal
"""
self.received_signals.append(signum)
if not self.body_executed:
raise errors.SignalExit
def _call_signals(self):
"""Finally call the deferred signals."""
for signum in self.received_signals:
logger.debug("Calling signal %s", signum)
os.kill(os.getpid(), signum)
class ExitHandler(ErrorHandler):
"""Context manager for running code that must be cleaned up.
Subclass of ErrorHandler, with the same usage and parameters.
In addition to cleaning up on all signals, also cleans up on
regular exit.
"""
def __init__(self, func, *args, **kwargs):
ErrorHandler.__init__(self, func, *args, **kwargs)
self.call_on_regular_exit = True
|
py
|
1a570e76663eb36f9ec9802be56e156d827958fb
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import gc
import importlib.util
import inspect
import os
from pathlib import Path
import shlex
import subprocess
import sys
import sphinx_gallery
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = Path(__file__).expanduser().absolute().parent
if curr_path.name == "_staging":
# Can't use curr_path.parent, because sphinx_gallery requires a relative path.
tvm_path = Path(os.pardir, os.pardir)
else:
tvm_path = Path(os.pardir)
sys.path.insert(0, str(tvm_path.resolve() / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "vta" / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "docs"))
# -- General configuration ------------------------------------------------
locale_dirs = ["translates/locales/"]
gettext_compact = False
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2021, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = tvm_path.joinpath("version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_staging"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy-1.8.0/html-scipyorg/", None),
"matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = [
tvm_path.joinpath("gallery", "tutorial"),
tvm_path.joinpath("gallery", "how_to", "compile_models"),
tvm_path.joinpath("gallery", "how_to", "deploy_models"),
tvm_path.joinpath("gallery", "how_to", "work_with_relay"),
tvm_path.joinpath("gallery", "how_to", "work_with_schedules"),
tvm_path.joinpath("gallery", "how_to", "optimize_operators"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autotvm"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"),
tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"),
tvm_path.joinpath("gallery", "how_to", "extend_tvm"),
tvm_path.joinpath("vta", "tutorials"),
]
gallery_dirs = [
"tutorial",
"how_to/compile_models",
"how_to/deploy_models",
"how_to/work_with_relay",
"how_to/work_with_schedules",
"how_to/optimize_operators",
"how_to/tune_with_autotvm",
"how_to/tune_with_autoscheduler",
"how_to/work_with_microtvm",
"how_to/extend_tvm",
"topic/vta/tutorials",
]
subsection_order = ExplicitOrder(
str(p)
for p in [
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"tutorial": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"tvmc_python.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"tensor_ir_blitz_course.py",
"topi.pi",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
],
"compile_models": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
"from_paddle.py",
],
"work_with_schedules": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize_operators": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"tune_with_autotvm": [
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"tune_with_autoscheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"extend_tvm": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
"micro": [
"micro_autotune.py",
"micro_reference_vm.py",
"micro_tflite.py",
"micro_ethosu.py",
"micro_tvmc.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_conf, fname):
gc.collect()
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
"matplotlib": "https://matplotlib.org/",
"numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", ".py"),
"find_mayavi_figures": False,
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": ("matplotlib", "seaborn", force_gc),
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2020 Apache Software Foundation | All right reserved"
footer_note = " ".join(
"""
Copyright © 2020 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
from legacy_redirect import build_legacy_redirect
def setup(app):
app.connect("autodoc-process-docstring", process_docstring)
app.connect("build-finished", build_legacy_redirect(tvm_path))
|
py
|
1a570f6df3c3710b3a9966b283d917a6a1b6e051
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenAuthAppCancelModel(object):
def __init__(self):
self._auth_app_id = None
self._auth_scene = None
self._operator_user_id = None
@property
def auth_app_id(self):
return self._auth_app_id
@auth_app_id.setter
def auth_app_id(self, value):
self._auth_app_id = value
@property
def auth_scene(self):
return self._auth_scene
@auth_scene.setter
def auth_scene(self, value):
self._auth_scene = value
@property
def operator_user_id(self):
return self._operator_user_id
@operator_user_id.setter
def operator_user_id(self, value):
self._operator_user_id = value
def to_alipay_dict(self):
params = dict()
if self.auth_app_id:
if hasattr(self.auth_app_id, 'to_alipay_dict'):
params['auth_app_id'] = self.auth_app_id.to_alipay_dict()
else:
params['auth_app_id'] = self.auth_app_id
if self.auth_scene:
if hasattr(self.auth_scene, 'to_alipay_dict'):
params['auth_scene'] = self.auth_scene.to_alipay_dict()
else:
params['auth_scene'] = self.auth_scene
if self.operator_user_id:
if hasattr(self.operator_user_id, 'to_alipay_dict'):
params['operator_user_id'] = self.operator_user_id.to_alipay_dict()
else:
params['operator_user_id'] = self.operator_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenAuthAppCancelModel()
if 'auth_app_id' in d:
o.auth_app_id = d['auth_app_id']
if 'auth_scene' in d:
o.auth_scene = d['auth_scene']
if 'operator_user_id' in d:
o.operator_user_id = d['operator_user_id']
return o
|
py
|
1a5712af5749f6fe2d031db2c05a822292bd784c
|
# -*- coding: utf-8 -*-
import click
import json
from ..utils.spinner import (
init_spinner,
start_spinner,
stop_spinner,
)
from ..utils.print import (
tbprint,
eprint,
oprint,
opprint,
)
@click.group()
@click.pass_obj
@click.pass_context
def command_runner(ctx, obj):
"""DNA Center Command Runner API (version: 1.3.3).
Wraps the DNA Center Command Runner API and exposes the API as native Python commands.
"""
ctx.obj = obj.command_runner
@command_runner.command()
@click.option('--headers', type=str, help='''Dictionary of HTTP Headers to send with the Request.''',
default=None,
show_default=True)
@click.option('-pp', '--pretty_print', type=int, help='''Pretty print indent''',
default=None,
show_default=True)
@click.option('--beep', is_flag=True, help='''Spinner beep (on)''')
@click.pass_obj
def get_all_keywords_of_clis_accepted(obj, pretty_print, beep,
headers):
"""Get valid keywords.
"""
spinner = init_spinner(beep=beep)
start_spinner(spinner)
try:
if headers is not None:
headers = json.loads(headers)
result = obj.get_all_keywords_of_clis_accepted(
headers=headers)
stop_spinner(spinner)
opprint(result, indent=pretty_print)
except Exception as e:
stop_spinner(spinner)
tbprint()
eprint('Error:', e)
click.Context.exit(-1)
@command_runner.command()
@click.option('--commands', type=str, multiple=True,
help='''CommandRunnerDTO's commands (list of strings).''',
default=None,
show_default=True)
@click.option('--description', type=str,
help='''CommandRunnerDTO's description.''',
default=None,
show_default=True)
@click.option('--deviceuuids', type=str, multiple=True,
help='''CommandRunnerDTO's deviceUuids (list of strings).''',
default=None,
show_default=True)
@click.option('--name', type=str,
help='''CommandRunnerDTO's name.''',
default=None,
show_default=True)
@click.option('--timeout', type=int,
help='''CommandRunnerDTO's timeout.''',
default=None,
show_default=True)
@click.option('--headers', type=str, help='''Dictionary of HTTP Headers to send with the Request.''',
default=None,
show_default=True)
@click.option('--payload', type=str, help='''A JSON serializable Python object to send in the body of the Request.''',
default=None,
show_default=True)
@click.option('--active_validation', type=bool, help='''Enable/Disable payload validation.''',
default=True,
show_default=True)
@click.option('-pp', '--pretty_print', type=int, help='''Pretty print indent''',
default=None,
show_default=True)
@click.option('--beep', is_flag=True, help='''Spinner beep (on)''')
@click.pass_obj
def run_read_only_commands_on_devices(obj, pretty_print, beep,
commands,
description,
deviceuuids,
name,
timeout,
headers,
payload,
active_validation):
"""Submit request for read-only CLIs.
"""
spinner = init_spinner(beep=beep)
start_spinner(spinner)
try:
if headers is not None:
headers = json.loads(headers)
if payload is not None:
payload = json.loads(payload)
commands = list(commands)
commands = commands if len(commands) > 0 else None
deviceuuids = list(deviceuuids)
deviceuuids = deviceuuids if len(deviceuuids) > 0 else None
result = obj.run_read_only_commands_on_devices(
commands=commands,
description=description,
deviceUuids=deviceuuids,
name=name,
timeout=timeout,
headers=headers,
payload=payload,
active_validation=active_validation)
stop_spinner(spinner)
opprint(result, indent=pretty_print)
except Exception as e:
stop_spinner(spinner)
tbprint()
eprint('Error:', e)
click.Context.exit(-1)
|
py
|
1a5713eb6333e87c12f00fec4c3969f5286e2d92
|
name = 'network_scanner'
|
py
|
1a5713eee5a615af79a0ef67fce70c4a991e68a7
|
# Copyright (C) 2021, RTE (http://www.rte-france.com)
# SPDX-License-Identifier: Apache-2.0
from vm_manager.vm_manager import (
list_vms,
start,
stop,
create,
clone,
remove,
enable_vm,
disable_vm,
is_enabled,
status,
create_snapshot,
remove_snapshot,
list_snapshots,
purge_image,
rollback_snapshot,
list_metadata,
get_metadata,
set_metadata,
)
|
py
|
1a5714b666b1cbe5126bbc30b56395e0611fa848
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from django.utils.translation import ugettext as _
from notebook.connectors.dataeng import DataEng, DATE_FORMAT
from jobbrowser.apis.base_api import Api
LOG = logging.getLogger(__name__)
class DataEngClusterApi(Api):
def apps(self, filters):
api = DataEng(self.user)
jobs = api.list_clusters()
return {
'apps': [{
'id': app['crn'],
'name': '%(clusterName)s' % app,
'status': app['status'],
'apiStatus': self._api_status(app['status']),
'type': '%(serviceType)s %(workersGroupSize)s %(instanceType)s %(cdhVersion)s' % app,
'user': app['clusterName'].split('-', 1)[0],
'progress': 100,
'queue': 'group',
'duration': 1,
'submitted': app['creationDate']
} for app in jobs['clusters']],
'total': len(jobs)
}
def app(self, appid):
return {}
def action(self, appid, action):
return {}
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, appid, app_type, app_property):
return {}
def _api_status(self, status):
if status in ['CREATING', 'CREATED', 'TERMINATING']:
return 'RUNNING'
elif status in ['ARCHIVING', 'COMPLETED']:
return 'SUCCEEDED'
else:
return 'FAILED' # KILLED and FAILED
class DataEngJobApi(Api):
def apps(self, filters):
kwargs = {}
if 'time' in filters:
if filters['time']['time_unit'] == 'minutes':
delta = timedelta(minutes=int(filters['time']['time_value']))
elif filters['time']['time_unit'] == 'hours':
delta = timedelta(hours=int(filters['time']['time_value']))
else:
delta = timedelta(days=int(filters['time']['time_value']))
kwargs['creation_date_after'] = (datetime.today() - delta).strftime(DATE_FORMAT)
# Todo: filter on 'cluster_crn'
api = DataEng(self.user)
jobs = api.list_jobs(**kwargs)
return {
'apps': [{
'id': app['jobId'],
'name': app['creationDate'],
'status': app['status'],
'apiStatus': self._api_status(app['status']),
'type': app['jobType'],
'user': '',
'progress': 100,
'duration': 10 * 3600,
'submitted': app['creationDate']
} for app in jobs['jobs']],
'total': len(jobs)
}
def app(self, appid):
handle = DataEng(self.user).describe_job(job_id=appid)
job = handle['job']
common = {
'id': job['jobId'],
'name': job['jobId'],
'status': job['status'],
'apiStatus': self._api_status(job['status']),
'progress': 50,
'duration': 10 * 3600,
'submitted': job['creationDate'],
'type': 'dataeng-job-%s' % job['jobType'],
}
common['properties'] = {
'properties': job
}
return common
def action(self, appid, action):
return {}
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, appid, app_type, app_property):
return {}
def _api_status(self, status):
if status in ['CREATING', 'CREATED', 'TERMINATING']:
return 'RUNNING'
elif status in ['COMPLETED']:
return 'SUCCEEDED'
else:
return 'FAILED' # INTERRUPTED , KILLED, TERMINATED and FAILED
|
py
|
1a57163a8de9cbcd6cc9d4e18bd380cecef4e533
|
#
# Copyright (C) 2010-2017 Samuel Abels
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Logging utilities.
"""
from .. import FileLogger
from .impl import add_label
_loggers = []
def log_to(logger):
"""
Wraps a function that has a connection passed such that everything that
happens on the connection is logged using the given logger.
:type logger: Logger
:param logger: The logger that handles the logging.
"""
logger_id = id(logger)
def decorator(function):
return add_label(function, 'log_to', logger_id=logger_id)
return decorator
def log_to_file(logdir, mode='a', delete=False, clearmem=True):
"""
Like :class:`log_to()`, but automatically creates a new FileLogger
instead of having one passed.
Note that the logger stays alive (in memory) forever. If you need
to control the lifetime of a logger, use :class:`log_to()` instead.
"""
logger = FileLogger(logdir, mode, delete, clearmem)
_loggers.append(logger)
return log_to(logger)
|
py
|
1a57179ac0c618ccf46bbf527fd453b83c6d94fc
|
# Import the modules
import cv2
import numpy as np
import os
import imutils
from imutils.video import WebcamVideoStream
DEBUG = True
# Custom functions
# define rotate function
def rotate(image, angle, center=None, scale=1.0):
# get image size
(h, w) = image.shape[:2]
# if dosen't assign image center, set image center point as center
if center is None:
center = (w / 2, h / 2)
# Do rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
# return rotated image
return rotated
# Load the model
os.chdir(os.path.dirname(__file__))
net = cv2.dnn.readNet('../model/inference_graph.xml', '../model/inference_graph.bin')
# Specify target device
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
# Read the Camera
vs = WebcamVideoStream(src=0).start()
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=600)
frame = rotate(frame, 180)
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image (_INV: Inverse 黑白反轉)
ret, im_th = cv2.threshold(im_gray, 127, 255, cv2.THRESH_BINARY_INV)
if DEBUG: im_th_display = im_th.copy()
# Find contours in the image
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# For each rectangular region, use mnist cnn model to predict.
for rect in rects:
# detect empty rect
# (x, y, w, h) => ex. (0, 0, 600, 450)
#ignore too small, too big, bad w:h ratio rect
if(rect[2]*rect[3] < 60 or rect[2]*rect[3] > 20000 or rect[2]>rect[3]*10):
if DEBUG: print('info:{}, IGNORE'.format(rect))
break
else:
if DEBUG: print('info:{}, DISPLAY'.format(rect))
else: pass
cv2.rectangle(frame, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
# Draw the rectangles
if DEBUG: cv2.rectangle(im_th_display, (pt2, pt1), (pt2+leng, pt1+leng), (255, 255, 255), 3)
# Prevent error: (-215 Assertion failed) !ssize.empty() in function 'resize'
if(roi.size == 0): break
# Resize the image
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
# 膨脹
roi = cv2.dilate(roi, (3, 3))
# Inference
blob = cv2.dnn.blobFromImage(roi, size=(28, 28), ddepth=cv2.CV_32F)
net.setInput(blob)
out = net.forward()
if out[0][int(np.argmax(out[0]))] > 0.5:
#cv2.putText(image, text, coordinate, font, size, color, width of line, type of line)
#cv2.putText(影像, 文字, 座標, 字型, 大小, 顏色, 線條寬度, 線條種類)
#if DEBUG: cv2.putText(im_th_display, str(np.argmax(out[0])), (rect[0], rect[1]), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 3)
cv2.putText(frame, str(np.argmax(out[0])), (rect[0], rect[1]), cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 3)
if DEBUG: cv2.imshow("Debug", im_th_display)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
py
|
1a57180823434daacbcd40e76141c3bc055b2599
|
"""
Functionalities for read game log data and representing it in an object-oriented fashion.
"""
__author__ = "Todd Shore <[email protected]>"
__copyright__ = "Copyright 2017 Todd Shore"
__license__ = "Apache License, Version 2.0"
import sys
from typing import Mapping
import pandas as pd
from . import session_data as sd
class EventData(object):
def __init__(self, events: pd.DataFrame, source_participant_ids: Mapping[str, str], initial_instructor_id: str):
self.events = events
self.source_participant_ids = source_participant_ids
self.initial_instructor_id = initial_instructor_id
def __eq__(self, other):
return (self is other or (isinstance(other, type(self))
and self.__key == other.__key))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return self.__class__.__name__ + str(self.__dict__)
@property
def __key(self):
return self.initial_instructor_id, self.source_participant_ids, self.events
def read_events(session_data: sd.SessionData) -> EventData:
participant_metadata = session_data.read_participant_metadata()
participant_source_ids = participant_metadata[sd.ParticipantMetadataRow.SOURCE_ID.value]
interned_source_participant_ids = dict(
(sys.intern(source_id), sys.intern(participant_id)) for (participant_id, source_id) in
participant_source_ids.items())
session_metadata = session_data.read_session_metadata()
initial_instructor_id = sys.intern(session_metadata[sd.EventMetadataRow.INITIAL_INSTRUCTOR_ID.value])
event_df = session_data.read_events()
return EventData(event_df, interned_source_participant_ids, initial_instructor_id)
|
py
|
1a57180897258f648145d20affacf9d7d2c95a83
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Dropout
import tensorflow.keras as keras
from zoo.automl.model.abstract import BaseModel
from zoo.automl.common.util import *
from zoo.automl.common.metrics import Evaluator
class VanillaLSTM(BaseModel):
def __init__(self, check_optional_config=False, future_seq_len=1):
"""
Constructor of Vanilla LSTM model
"""
self.model = None
self.check_optional_config = check_optional_config
self.future_seq_len = future_seq_len
self.feature_num = None
self.metric = None
self.batch_size = None
self.loss = None
def _get_dropout(self, input_tensor, p=0.5, mc=False):
if mc:
return Dropout(p)(input_tensor, training=True)
else:
return Dropout(p)(input_tensor)
def _build(self, mc=False, **config):
"""
build vanilla LSTM model
:param config: model hyper parameters
:return: self
"""
super()._check_config(**config)
self.metric = config.get('metric', 'mean_squared_error')
self.batch_size = config.get('batch_size', 1024)
self.feature_num = config["feature_num"]
self.loss = config.get("loss", "mse")
inp = Input(shape=(None, self.feature_num))
lstm_1 = LSTM(units=config.get('lstm_1_units', 20),
return_sequences=True)(inp)
dropout_1 = self._get_dropout(lstm_1,
p=config.get('dropout_1', 0.2),
mc=mc)
lstm_2 = LSTM(units=config.get('lstm_2_units', 10),
return_sequences=False)(dropout_1)
dropout_2 = self._get_dropout(lstm_2,
p=config.get('dropout_2', 0.2),
mc=mc)
out = Dense(self.future_seq_len)(dropout_2)
self.model = Model(inputs=inp, outputs=out)
self.model.compile(loss=self.loss,
metrics=[self.metric],
optimizer=keras.optimizers.RMSprop(lr=config.get('lr', 0.001)))
return self.model
def fit_eval(self, x, y, validation_data=None, mc=False, verbose=0, **config):
"""
fit for one iteration
:param x: 3-d array in format (no. of samples, past sequence length, 2+feature length),
in the last dimension, the 1st col is the time index (data type needs to be numpy datetime
type, e.g. "datetime64"),
the 2nd col is the target value (data type should be numeric)
:param y: 2-d numpy array in format (no. of samples, future sequence length)
if future sequence length > 1, or 1-d numpy array in format (no. of samples, )
if future sequence length = 1
:param validation_data: tuple in format (x_test,y_test), data used for validation.
If this is specified, validation result will be the optimization target for automl.
Otherwise, train metric will be the optimization target.
:param config: optimization hyper parameters
:return: the resulting metric
"""
config.update({"feature_num": x.shape[2]})
# if model is not initialized, __build the model
if self.model is None:
self._build(mc=mc, **config)
hist = self.model.fit(x, y,
validation_data=validation_data,
batch_size=self.batch_size,
epochs=config.get("epochs", 10),
verbose=verbose
)
# print(hist.history)
if validation_data is None:
# get train metrics
# results = self.model.evaluate(x, y)
result = hist.history.get(self.metric)[0]
else:
result = hist.history.get('val_' + str(self.metric))[0]
return result
def evaluate(self, x, y, metric=['mse']):
"""
Evaluate on x, y
:param x: input
:param y: target
:param metric: a list of metrics in string format
:return: a list of metric evaluation results
"""
y_pred = self.predict(x)
return [Evaluator.evaluate(m, y, y_pred) for m in metric]
def predict(self, x, mc=False):
"""
Prediction on x.
:param x: input
:return: predicted y
"""
return self.model.predict(x)
def predict_with_uncertainty(self, x, n_iter=100):
result = np.zeros((n_iter,) + (x.shape[0], self.future_seq_len))
for i in range(n_iter):
result[i, :, :] = self.predict(x)
prediction = result.mean(axis=0)
uncertainty = result.std(axis=0)
return prediction, uncertainty
def save(self, model_path, config_path):
"""
save model to file.
:param model_path: the model file.
:param config_path: the config file
:return:
"""
self.model.save(model_path)
# os.rename("vanilla_lstm_tmp.h5", model_path)
config_to_save = {
# "future_seq_len": self.future_seq_len,
"metric": self.metric,
"batch_size": self.batch_size
}
save_config(config_path, config_to_save)
def restore(self, model_path, **config):
"""
restore model from file
:param model_path: the model file
:param config: the trial config
:return: the restored model
"""
# self.model = None
# self._build(**config)
self.model = keras.models.load_model(model_path)
# self.model.load_weights(file_path)
# self.future_seq_len = config["future_seq_len"]
# for continuous training
self.metric = config["metric"]
self.batch_size = config["batch_size"]
def _get_required_parameters(self):
return {
"feature_num"
}
def _get_optional_parameters(self):
return {
'lstm_1_units',
'dropout_1',
'lstm_2_units',
'dropout_2',
'metric',
'lr',
'epochs',
'batch_size',
'loss'
}
|
py
|
1a57186bd9b7e2b5cb6aab2c96191dc7a2992e0d
|
"""Support to interface with the Plex API."""
from functools import wraps
import json
import logging
import plexapi.exceptions
import requests.exceptions
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.helpers.network import is_internal_request
from .const import (
COMMON_PLAYERS,
CONF_SERVER_IDENTIFIER,
DISPATCHERS,
DOMAIN as PLEX_DOMAIN,
NAME_FORMAT,
PLEX_NEW_MP_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL,
PLEX_UPDATE_SENSOR_SIGNAL,
SERVERS,
)
from .media_browser import browse_media
_LOGGER = logging.getLogger(__name__)
def needs_session(func):
"""Ensure session is available for certain attributes."""
@wraps(func)
def get_session_attribute(self, *args):
if self.session is None:
return None
return func(self, *args)
return get_session_attribute
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Plex media_player from a config entry."""
server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
registry = await async_get_registry(hass)
@callback
def async_new_media_players(new_entities):
_async_add_entities(hass, registry, async_add_entities, server_id, new_entities)
unsub = async_dispatcher_connect(
hass, PLEX_NEW_MP_SIGNAL.format(server_id), async_new_media_players
)
hass.data[PLEX_DOMAIN][DISPATCHERS][server_id].append(unsub)
_LOGGER.debug("New entity listener created")
@callback
def _async_add_entities(hass, registry, async_add_entities, server_id, new_entities):
"""Set up Plex media_player entities."""
_LOGGER.debug("New entities: %s", new_entities)
entities = []
plexserver = hass.data[PLEX_DOMAIN][SERVERS][server_id]
for entity_params in new_entities:
plex_mp = PlexMediaPlayer(plexserver, **entity_params)
entities.append(plex_mp)
# Migration to per-server unique_ids
old_entity_id = registry.async_get_entity_id(
MP_DOMAIN, PLEX_DOMAIN, plex_mp.machine_identifier
)
if old_entity_id is not None:
new_unique_id = f"{server_id}:{plex_mp.machine_identifier}"
_LOGGER.debug(
"Migrating unique_id from [%s] to [%s]",
plex_mp.machine_identifier,
new_unique_id,
)
registry.async_update_entity(old_entity_id, new_unique_id=new_unique_id)
async_add_entities(entities, True)
class PlexMediaPlayer(MediaPlayerEntity):
"""Representation of a Plex device."""
def __init__(self, plex_server, device, player_source, session=None):
"""Initialize the Plex device."""
self.plex_server = plex_server
self.device = device
self.player_source = player_source
self.device_make = None
self.device_platform = None
self.device_product = None
self.device_title = None
self.device_version = None
self.machine_identifier = device.machineIdentifier
self.session_device = None
self._available = False
self._device_protocol_capabilities = None
self._name = None
self._previous_volume_level = 1 # Used in fake muting
self._state = STATE_IDLE
self._volume_level = 1 # since we can't retrieve remotely
self._volume_muted = False # since we can't retrieve remotely
# Initializes other attributes
self.session = session
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
_LOGGER.debug("Added %s [%s]", self.entity_id, self.unique_id)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SIGNAL.format(self.unique_id),
self.async_refresh_media_player,
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
PLEX_UPDATE_MEDIA_PLAYER_SESSION_SIGNAL.format(self.unique_id),
self.async_update_from_websocket,
)
)
@callback
def async_refresh_media_player(self, device, session, source):
"""Set instance objects and trigger an entity state update."""
_LOGGER.debug("Refreshing %s [%s / %s]", self.entity_id, device, session)
self.device = device
self.session = session
if source:
self.player_source = source
self.async_schedule_update_ha_state(True)
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
@callback
def async_update_from_websocket(self, state):
"""Update the entity based on new websocket data."""
self.update_state(state)
self.async_write_ha_state()
async_dispatcher_send(
self.hass,
PLEX_UPDATE_SENSOR_SIGNAL.format(self.plex_server.machine_identifier),
)
def update(self):
"""Refresh key device data."""
if not self.session:
self.force_idle()
if not self.device:
self._available = False
return
self._available = True
try:
device_url = self.device.url("/")
except plexapi.exceptions.BadRequest:
device_url = "127.0.0.1"
if "127.0.0.1" in device_url:
self.device.proxyThroughServer()
self._device_protocol_capabilities = self.device.protocolCapabilities
for device in filter(None, [self.device, self.session_device]):
self.device_make = self.device_make or device.device
self.device_platform = self.device_platform or device.platform
self.device_product = self.device_product or device.product
self.device_title = self.device_title or device.title
self.device_version = self.device_version or device.version
name_parts = [self.device_product, self.device_title or self.device_platform]
if (self.device_product in COMMON_PLAYERS) and self.device_make:
# Add more context in name for likely duplicates
name_parts.append(self.device_make)
if self.username and self.username != self.plex_server.owner:
# Prepend username for shared/managed clients
name_parts.insert(0, self.username)
self._name = NAME_FORMAT.format(" - ".join(name_parts))
def force_idle(self):
"""Force client to idle."""
self._state = STATE_IDLE
if self.player_source == "session":
self.device = None
self.session_device = None
self._available = False
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self):
"""Return the id of this plex client."""
return f"{self.plex_server.machine_identifier}:{self.machine_identifier}"
@property
def session(self):
"""Return the active session for this player."""
return self._session
@session.setter
def session(self, session):
self._session = session
if session:
self.session_device = self.session.player
self.update_state(self.session.state)
else:
self._state = STATE_IDLE
@property
def available(self):
"""Return the availability of the client."""
return self._available
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
@needs_session
def username(self):
"""Return the username of the client owner."""
return self.session.username
@property
def state(self):
"""Return the state of the device."""
return self._state
def update_state(self, state):
"""Set the state of the device, handle session termination."""
if state == "playing":
self._state = STATE_PLAYING
elif state == "paused":
self._state = STATE_PAUSED
elif state == "stopped":
self.session = None
self.force_idle()
else:
self._state = STATE_IDLE
@property
def _is_player_active(self):
"""Report if the client is playing media."""
return self.state in [STATE_PLAYING, STATE_PAUSED]
@property
def _active_media_plexapi_type(self):
"""Get the active media type required by PlexAPI commands."""
if self.media_content_type is MEDIA_TYPE_MUSIC:
return "music"
return "video"
@property
@needs_session
def session_key(self):
"""Return current session key."""
return self.session.sessionKey
@property
@needs_session
def media_library_title(self):
"""Return the library name of playing media."""
return self.session.media_library_title
@property
@needs_session
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.session.media_content_id
@property
@needs_session
def media_content_type(self):
"""Return the content type of current playing media."""
return self.session.media_content_type
@property
@needs_session
def media_content_rating(self):
"""Return the content rating of current playing media."""
return self.session.media_content_rating
@property
@needs_session
def media_artist(self):
"""Return the artist of current playing media, music track only."""
return self.session.media_artist
@property
@needs_session
def media_album_name(self):
"""Return the album name of current playing media, music track only."""
return self.session.media_album_name
@property
@needs_session
def media_album_artist(self):
"""Return the album artist of current playing media, music only."""
return self.session.media_album_artist
@property
@needs_session
def media_track(self):
"""Return the track number of current playing media, music only."""
return self.session.media_track
@property
@needs_session
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_duration
@property
@needs_session
def media_position(self):
"""Return the duration of current playing media in seconds."""
return self.session.media_position
@property
@needs_session
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self.session.media_position_updated_at
@property
@needs_session
def media_image_url(self):
"""Return the image URL of current playing media."""
return self.session.media_image_url
@property
@needs_session
def media_summary(self):
"""Return the summary of current playing media."""
return self.session.media_summary
@property
@needs_session
def media_title(self):
"""Return the title of current playing media."""
return self.session.media_title
@property
@needs_session
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return self.session.media_season
@property
@needs_session
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self.session.media_series_title
@property
@needs_session
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self.session.media_episode
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.device and "playback" in self._device_protocol_capabilities:
return (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_STOP
| SUPPORT_SEEK
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_MUTE
| SUPPORT_BROWSE_MEDIA
)
return SUPPORT_BROWSE_MEDIA | SUPPORT_PLAY_MEDIA
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.setVolume(int(volume * 100), self._active_media_plexapi_type)
self._volume_level = volume # store since we can't retrieve
@property
def volume_level(self):
"""Return the volume level of the client (0..1)."""
if (
self._is_player_active
and self.device
and "playback" in self._device_protocol_capabilities
):
return self._volume_level
return None
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
if self._is_player_active and self.device:
return self._volume_muted
return None
def mute_volume(self, mute):
"""Mute the volume.
Since we can't actually mute, we'll:
- On mute, store volume and set volume to 0
- On unmute, set volume to previously stored volume
"""
if not (self.device and "playback" in self._device_protocol_capabilities):
return
self._volume_muted = mute
if mute:
self._previous_volume_level = self._volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._previous_volume_level)
def media_play(self):
"""Send play command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.play(self._active_media_plexapi_type)
def media_pause(self):
"""Send pause command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.pause(self._active_media_plexapi_type)
def media_stop(self):
"""Send stop command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.stop(self._active_media_plexapi_type)
def media_seek(self, position):
"""Send the seek command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.seekTo(position * 1000, self._active_media_plexapi_type)
def media_next_track(self):
"""Send next track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipNext(self._active_media_plexapi_type)
def media_previous_track(self):
"""Send previous track command."""
if self.device and "playback" in self._device_protocol_capabilities:
self.device.skipPrevious(self._active_media_plexapi_type)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if not (self.device and "playback" in self._device_protocol_capabilities):
_LOGGER.debug(
"Client is not currently accepting playback controls: %s", self.name
)
return
src = json.loads(media_id)
if isinstance(src, int):
src = {"plex_key": src}
playqueue_id = src.pop("playqueue_id", None)
if playqueue_id:
try:
playqueue = self.plex_server.get_playqueue(playqueue_id)
except plexapi.exceptions.NotFound as err:
raise HomeAssistantError(
f"PlayQueue '{playqueue_id}' could not be found"
) from err
else:
shuffle = src.pop("shuffle", 0)
media = self.plex_server.lookup_media(media_type, **src)
if media is None:
_LOGGER.error("Media could not be found: %s", media_id)
return
_LOGGER.debug("Attempting to play %s on %s", media, self.name)
playqueue = self.plex_server.create_playqueue(media, shuffle=shuffle)
try:
self.device.playMedia(playqueue)
except requests.exceptions.ConnectTimeout:
_LOGGER.error("Timed out playing on %s", self.name)
@property
def device_state_attributes(self):
"""Return the scene state attributes."""
attributes = {}
for attr in [
"media_content_rating",
"media_library_title",
"player_source",
"media_summary",
"username",
]:
value = getattr(self, attr, None)
if value:
attributes[attr] = value
return attributes
@property
def device_info(self):
"""Return a device description for device registry."""
if self.machine_identifier is None:
return None
return {
"identifiers": {(PLEX_DOMAIN, self.machine_identifier)},
"manufacturer": self.device_platform or "Plex",
"model": self.device_product or self.device_make,
"name": self.name,
"sw_version": self.device_version,
"via_device": (PLEX_DOMAIN, self.plex_server.machine_identifier),
}
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
is_internal = is_internal_request(self.hass)
return await self.hass.async_add_executor_job(
browse_media,
self,
is_internal,
media_content_type,
media_content_id,
)
async def async_get_browse_image(
self, media_content_type, media_content_id, media_image_id=None
):
"""Get media image from Plex server."""
image_url = self.plex_server.thumbnail_cache.get(media_content_id)
if image_url:
result = await self._async_fetch_image(image_url)
return result
return (None, None)
|
py
|
1a5718bd7559482e0640ec8a853643c3ef90bcf7
|
from django import template
register = template.Library()
@register.filter_function
def attr(obj, arg1):
"""
Use in templates:
{% load field_attrs %}
then, in a form field:
{{ form.phone|attr:"style=width:143px;background-color:yellow"|attr:"size=30" }}
"""
att, value = arg1.split("=")
obj.field.widget.attrs[att] = value
return obj
|
py
|
1a5718bef949ee0223b9ea61bc4e06269f53866f
|
# -*- coding: utf-8 -*-
import pymysql
from selenium import webdriver
# driver = webdriver.PhantomJS()
driver = webdriver.Chrome()
base_url = 'https://wwww.baidu.com'
driver.get(base_url)
driver.find_element_by_id('kw').send_keys('selenium')
driver.find_element_by_id('su').click()
|
py
|
1a5718f1e5e784a53854d403701e28d13f8b3461
|
# This section is to improve python compataibilty between py27 and py3
from __future__ import absolute_import
import sys
from . import ColorPy
sys.modules[__package__] = ColorPy
|
py
|
1a57191a45da12bc8aac2d306e361c5f2645d415
|
from random import choice, randrange
import uwsgi
import gevent
import gevent.queue
import gevent.event
import gevent.select
import redis
import math
class ArenaObject(object):
def __init__(self, x, y, r, speed=15):
self.height = 13.5
self.width = 13.5
self.scale = 5
self.x = x
self.y = y
self.r = r
self.speed = speed
def translate(self, amount):
amount_x = math.sin(self.r) * amount
amount_y = math.cos(self.r) * amount
self.x += round(amount_x)
self.y += round(amount_y)
#print('x:{} y:{} r:{}'.format(self.x, self.y, self.r))
def rotateR(self):
if self.r <= 0:
self.r = 2 * math.pi
else:
self.r -= 0.1
def rotateL(self):
if self.r >= 2 * math.pi:
self.r = 0
else:
self.r += 0.1
def collide(self, x, y, width, height):
dx = abs(self.x - x) * 2
dw = self.width * self.scale + width
dy = abs(self.y - y) * 2
dh = self.height * self.scale + height
return (dx < dw) and (dy < dh)
class Bonus(object):
def __init__(self, game, id, x, y, type):
self.game = game
self.id = id
self.type = type
self.arena_object = ArenaObject(x, y, 0, 0)
self.game.active_bonus_malus.append(self)
self.game.broadcast('bm,{},{},{},{},{}'.format(
self.id, self.type, self.arena_object.x, 50, self.arena_object.y))
def activate_bonus(self, player):
self.game.active_bonus_malus.remove(self)
self.game.broadcast(
'bm,gv,{},{},{}'.format(self.id, self.type, player.name))
self.game.bonus_malus_spawn_points.append(
(self.arena_object.x, self.arena_object.y))
class TimerBonus(Bonus):
def __init__(self, game, id, x, y, type, time=15):
super(TimerBonus, self).__init__(game, id, x, y, type)
self.time = time
class BonusHaste(TimerBonus):
def __init__(self, game, id, x, y, type='haste'):
super(BonusHaste, self).__init__(game, id, x, y, type)
def activate_bonus(self, player):
super(BonusHaste, self).activate_bonus(player)
old_speed = player.arena_object.speed
player.arena_object.speed *= 2
gevent.sleep(self.time)
player.arena_object.speed = old_speed
self.game.broadcast('bm,rm,{},{}'.format(self.type, player.name))
class BonusPower(TimerBonus):
def __init__(self, game, id, x, y, type='power'):
super(BonusPower, self).__init__(game, id, x, y, type)
def activate_bonus(self, player):
super(BonusPower, self).activate_bonus(player)
old_damage = player.bullet.damage
player.bullet.damage *= 2
gevent.sleep(self.time)
player.bullet.damage = old_damage
self.game.broadcast('bm,rm,{},{}'.format(self.type, player.name))
class BonusHeal(Bonus):
def __init__(self, game, id, x, y, type='heal', amount=50.0):
super(BonusHeal, self).__init__(game, id, x, y, type)
self.amount = amount
def activate_bonus(self, player):
super(BonusHeal, self).activate_bonus(player)
player.energy = player.energy + self.amount if player.energy <= 50 else 100.0
class Arena(object):
def __init__(self, min_players=3, max_players=8, warmup=10):
self.greenlets = {
'engine': self.engine_start,
'start': self.start
}
self.posters = [
'posters/robbo.jpg',
'posters/raffo.jpg',
'posters/unbit.jpg',
'posters/20tab.jpg',
'posters/beri.jpg',
'posters/pycon.jpg'
]
self.animations = []
self.players = {}
self.waiting_players = []
self.min_players = min_players
self.max_players = max_players
self.warmup = warmup
self.started = False
self.finished = False
#self.warming_up = False
self.walls = (
#sc_x, sc_y, sc_z, x, y, z, r
(200, 100, 50, 0, 150, -1950, 0),
(200, 100, 50, -1950, 150, 0, -math.pi / 2),
(200, 100, 50, 1950, 150, 0, -math.pi / 2),
(200, 100, 50, 0, 150, 1950, 0),
( 50, 50, 30, -730, 150, -1200, 0),
( 50, 50, 30, 730, 150, -1200, 0),
( 50, 50, 30, -1200, 150, -730, -math.pi / 2),
( 50, 50, 30, -1200, 150, 730, -math.pi / 2),
( 50, 50, 30, 1200, 150, -730, -math.pi / 2),
( 50, 50, 30, 1200, 150, 730, -math.pi / 2),
( 50, 50, 30, -730, 150, 1200, 0),
( 50, 50, 30, 730, 150, 1200, 0),
)
self.spawn_points = (
# x, y, r
#( 0, 1650, math.pi),
#( 0, -1650, 0),
( -935, 935, 3 * math.pi / 4, 0x7777AA),
( 935, 935, 5 * math.pi / 4, 0x770000),
( 935, -935, 7 * math.pi / 4, 0x007700),
( -935, -935, math.pi / 4, 0x777700),
(-1650, 1650, 3 * math.pi / 4, 0xAA00AA),
#(-1650, 0, math.pi / 2),
#( 1650, 0, 3 * math.pi / 2),
( 1650, 1650, 5 * math.pi / 4, 0x007777),
( 1650, -1650, 7 * math.pi / 4, 0x000077),
(-1650, -1650, math.pi / 4, 0xFFAA77),
)
self.arena = "arena{}".format(uwsgi.worker_id())
self.redis = redis.StrictRedis()
self.channel = self.redis.pubsub()
self.channel.subscribe(self.arena)
self.bonus_malus = (
BonusHaste,
# BonusGiant,
BonusPower,
BonusHeal,
)
self.bonus_malus_spawn_points = [
( 0, 0),
( 0, 1650),
( 0, -1650),
(-1650, 0),
( 1650, 0),
]
self.active_bonus_malus = []
self.spawn_iterator = iter(self.spawn_points)
def broadcast(self, msg):
self.redis.publish(self.arena, 'arena:{}'.format(msg))
def msg_handler(self, player, msg):
p, cmd = msg.split(':')
try:
if cmd in ('at', 'AT'):
self.players[p].attack_cmd = cmd
else:
self.players[p].cmd = cmd
except KeyError:
print 'Player {} does not exists or is dead'.format(p)
def attack_cmd_handler(self, player, cmd):
if cmd == 'AT':
player.bullet.shoot()
player.attack = 1
return True
elif cmd == 'at':
player.attack = 0
return False
def cmd_handler(self, player, cmd):
if cmd == 'rl':
player.arena_object.rotateL()
return True
elif cmd == 'rr':
player.arena_object.rotateR()
return True
elif cmd == 'fw':
old_x = player.arena_object.x
old_y = player.arena_object.y
player.arena_object.translate(player.arena_object.speed)
if not self.collision(player):
return True
player.arena_object.x = old_x
player.arena_object.y = old_y
# player.arena_object.translate(-player.arena_object.speed)
elif cmd == 'bw':
old_x = player.arena_object.x
old_y = player.arena_object.y
player.arena_object.translate(-player.arena_object.speed)
if not self.collision(player):
return True
player.arena_object.x = old_x
player.arena_object.y = old_y
# player.arena_object.translate(player.arena_object.speed)
return False
def collision(self, player):
for p in self.players.keys():
if self.players[p] == player:
continue
#check for body collision
if player.arena_object.collide(
self.players[p].arena_object.x,
self.players[p].arena_object.y,
self.players[p].arena_object.width * self.players[p].arena_object.scale,
self.players[p].arena_object.height * self.players[p].arena_object.scale
):
# if player.attack == 1:
# if self.players[p].attack == 0:
# self.players[p].damage(1.0, player.name)
# else:
# self.players[p].damage(1.0, player.name)
# elif self.players[p]. attack == 1:
# player.damage(1.0, 'himself')
# self.broadcast("collision between {} and {}".format(player.name, p))
return True
for wall in self.walls:
if wall[6] == 0:
height = 1 * wall[2]
width = 20 * wall[0]
else:
height = 20 * wall[0]
width = 1 * wall[2]
if player.arena_object.collide(wall[3], wall[5], width, height):
return True
for bm in self.active_bonus_malus:
if player.arena_object.collide(
bm.arena_object.x,
bm.arena_object.y,
bm.arena_object.width * bm.arena_object.scale,
bm.arena_object.height * bm.arena_object.scale,
):
gevent.spawn(bm.activate_bonus, player)
return False
def engine_start(self):
del self.greenlets['engine']
print('engine started')
while True:
if (len(self.players) == 1 and self.started):
self.finished = True
self.winning_logic()
self.restart_game(11)
break
elif (len(self.players) == 0):
self.finished = True
self.restart_game()
break
t = uwsgi.micros() / 1000.0
for p in self.players.keys():
player = self.players[p]
if player.cmd:
draw = self.cmd_handler(player, player.cmd)
if draw:
player.update_gfx()
player.cmd = None
if player.attack_cmd:
draw = self.attack_cmd_handler(player, player.attack_cmd)
# print player.attack_cmd
if draw:
player.update_gfx()
player.attack_cmd = None
for animation in self.animations:
animation.animate()
t1 = uwsgi.micros() / 1000.0
delta = t1 - t
if delta < 33.33:
gevent.sleep((33.33 - delta) / 1000.0)
self.greenlets['engine'] = self.engine_start
print("engine ended")
def start(self):
del self.greenlets['start']
print("START!!")
#self.warming_up = True
while len(self.players) < self.min_players:
for p in self.players.keys():
self.players[p].update_gfx()
gevent.sleep(1)
if self.finished:
self.greenlets['start'] = self.start
print("ending")
return
warmup = self.warmup
while warmup > 0:
gevent.sleep(1.0)
self.broadcast("warmup,{} seconds to start".format(warmup))
warmup -= 1
#self.warmup = False
self.started = True
#gevent.spawn(self.engine_start)
gevent.sleep()
self.broadcast("FIGHT!!!")
gevent.sleep()
bm_counter = 0
while not self.finished:
gevent.sleep(10.0)
if len(self.bonus_malus_spawn_points) > 0:
coordinates = self.bonus_malus_spawn_points.pop(
randrange(len(self.bonus_malus_spawn_points)))
choice(self.bonus_malus)(self, bm_counter, *(coordinates))
bm_counter += 1
gevent.sleep(1.0)
self.broadcast("end")
self.started = False
self.greenlets['start'] = self.start
print("end")
gevent.sleep()
def spawn_greenlets(self):
for greenlet in self.greenlets:
#if len(self.players) >= self.min_players:
if len(self.players) >= 1:
gevent.spawn(self.greenlets[greenlet])
# place up to 8 waiting_players
# in the player list and start the game again
# unless less than 2 players are available
def winning_logic(self):
winner_name = self.players.keys()[0]
self.players[winner_name].end('winner')
def restart_game(self, countdown=15):
countdown = countdown
while countdown > 0:
self.broadcast(
'next game will start in {} seconds'.format(countdown))
gevent.sleep(1)
countdown -= 1
self.finished = False
self.players = {}
if len(self.waiting_players) > 0:
for player in self.waiting_players:
self.players[player.name] = player
if len(self.players) >= self.max_players:
break
self.broadcast('waiting for players')
class Player(object):
def __init__(self, game, name, avatar, fd, x, y, r, color, speed=15):
self.game = game
self.name = name
self.avatar = avatar
self.fd = fd
self.arena_object = ArenaObject(x, y, r, speed)
self.attack = 0
self.energy = 100.0
self.arena = "arena{}".format(uwsgi.worker_id())
self.redis = redis.StrictRedis()
self.channel = self.redis.pubsub()
self.channel.subscribe(self.arena)
self.redis_fd = self.channel.connection._sock.fileno()
self.cmd = None
self.attack_cmd = None
self.bullet = Bullet(self.game, self)
self.color = color
# check if self.energy is 0, in such a case
# trigger the kill procedure removing the player from the list
# if after the death a single player remains,
# trigger the winning procedure
def damage(self, amount, attacker):
if not self.game.started:
return
self.energy -= amount
if self.energy <= 0:
self.game.broadcast(
'{} was killed by {}'.format(self.name, attacker)
)
self.end('loser')
else:
self.update_gfx()
def end(self, status):
self.send_all('kill:{},{}'.format(status, self.name))
del self.game.players[self.name]
def send_all(self, msg):
self.redis.publish(self.arena, msg)
def update_gfx(self):
msg = "{}:{},{},{},{},{},{},{},{},{}".format(
self.name,
self.arena_object.r,
self.arena_object.x,
30,
self.arena_object.y,
self.attack,
self.energy,
self.avatar,
self.arena_object.scale,
self.color
)
self.send_all(msg)
def wait_for_game(self):
while (self.game.started or self.game.finished or
self.name not in self.game.players):
gevent.sleep(1)
try:
uwsgi.websocket_recv_nb()
except IOError:
import sys
print sys.exc_info()
if self.name in self.players:
self.end('leaver')
return [""]
class Bullet(object):
def __init__(self, game, player, damage=10, speed=50, _range=1500.0):
self.game = game
self.player = player
self.arena_object = ArenaObject(
self.player.arena_object.x, self.player.arena_object.y, 0.0, speed)
self.is_shooting = 0
self._range = _range
self.damage = damage
def shoot(self):
if self.is_shooting > 0:
return
self.arena_object.x = self.player.arena_object.x
self.arena_object.y = self.player.arena_object.y
self.arena_object.r = self.player.arena_object.r
self.is_shooting = self._range
self.player.damage(1.0, 'himself')
self.game.animations.append(self)
def animate(self):
self.arena_object.translate(self.arena_object.speed)
if self.collision():
self.is_shooting = 0
else:
self.is_shooting -= self.arena_object.speed
msg = "!:{}:{},{},{},{},{}".format(
self.player.name,
self.arena_object.r,
self.arena_object.x,
50,
self.arena_object.y,
self.is_shooting
)
self.player.send_all(msg)
if self.is_shooting <= 0:
self.game.animations.remove(self)
def collision(self):
for p in self.game.players.keys():
if self.game.players[p] == self.player:
continue
if self.arena_object.collide(
self.game.players[p].arena_object.x,
self.game.players[p].arena_object.y,
self.game.players[p].arena_object.width * self.game.players[p].arena_object.scale,
self.game.players[p].arena_object.height * self.game.players[p].arena_object.scale,
):
self.game.players[p].damage(self.damage, self.player.name)
return True
for wall in self.game.walls:
if wall[6] == 0:
height = 1 * wall[2]
width = 19.5 * wall[0]
else:
height = 19.5 * wall[0]
width = 1 * wall[2]
if self.arena_object.collide(wall[3], wall[5], width, height):
return True
return False
class Robotab(Arena):
def __call__(self, e, sr):
if e['PATH_INFO'] == '/':
sr('200 OK', [('Content-Type', 'text/html')])
return [open('robotab_ws.html').read()]
if e['PATH_INFO'] == '/robotab.js':
sr('200 OK', [('Content-Type', 'application/javascript')])
return [open('static/js/robotab.js').read()]
if e['PATH_INFO'] == '/robotab':
uwsgi.websocket_handshake()
username, avatar = uwsgi.websocket_recv().split(':')
try:
robot_coordinates = self.spawn_iterator.next()
except StopIteration:
self.spawn_iterator = iter(self.spawn_points)
robot_coordinates = self.spawn_iterator.next()
uwsgi.websocket_send('posters:{}'.format(';'.join(self.posters)))
for wall in self.walls:
uwsgi.websocket_send(
'wall:{},{},{},{},{},{},{}'.format(*wall))
player = Player(self, username, avatar,
uwsgi.connection_fd(), *robot_coordinates)
if(self.started or self.finished or
len(self.players) > self.max_players or
len(self.waiting_players) > 0):
print('{}:{}:{}:{}'.format(
self.started, self.finished,
len(self.players) > self.max_players,
len(self.waiting_players) > 0))
self.waiting_players.append(player)
uwsgi.websocket_send(
"arena:hey {}, wait for next game".format(player.name))
player.wait_for_game()
self.waiting_players.remove(player)
else:
self.players[player.name] = player
self.spawn_greenlets()
for p in self.players.keys():
self.players[p].update_gfx()
while True:
ready = gevent.select.select(
[player.fd, player.redis_fd], [], [], timeout=4.0)
if not ready[0]:
uwsgi.websocket_recv_nb()
for fd in ready[0]:
if fd == player.fd:
try:
msg = uwsgi.websocket_recv_nb()
except IOError:
import sys
print sys.exc_info()
if player.name in self.players:
player.end('leaver')
return [""]
if msg and not self.finished:
self.msg_handler(player, msg)
elif fd == player.redis_fd:
msg = player.channel.parse_response()
if msg[0] == 'message':
uwsgi.websocket_send(msg[2])
application = Robotab()
|
py
|
1a571960bc7e5cd8b89b990fd593dfd28df03b65
|
"""
Component for the Goalfeed service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/goalfeed/
"""
import json
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
REQUIREMENTS = ['pysher==1.0.4']
DOMAIN = 'goalfeed'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
}, extra=vol.ALLOW_EXTRA)
GOALFEED_HOST = 'feed.goalfeed.ca'
GOALFEED_AUTH_ENDPOINT = 'https://goalfeed.ca/feed/auth'
GOALFEED_APP_ID = 'bfd4ed98c1ff22c04074'
def setup(hass, config):
"""Set up the Goalfeed component."""
import pysher
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
def goal_handler(data):
"""Handle goal events."""
goal = json.loads(json.loads(data))
hass.bus.fire('goal', event_data=goal)
def connect_handler(data):
"""Handle connection."""
post_data = {
'username': username,
'password': password,
'connection_info': data}
resp = requests.post(GOALFEED_AUTH_ENDPOINT, post_data,
timeout=30).json()
channel = pusher.subscribe('private-goals', resp['auth'])
channel.bind('goal', goal_handler)
pusher = pysher.Pusher(GOALFEED_APP_ID, secure=False, port=8080,
custom_host=GOALFEED_HOST)
pusher.connection.bind('pusher:connection_established', connect_handler)
pusher.connect()
return True
|
py
|
1a5719f1ad65d6a14e437231abfc57167de1bddc
|
import os
from celery.schedules import crontab
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
# Set in local_settings.py
SECRET_KEY = 'SECRET_SECRET_SECRET'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv('DEBUG_STATUS', True)
ALLOWED_HOSTS = ['*']
# Application definition
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'simple_pagination',
'compressor',
'common',
'accounts',
'cases',
'contacts',
'emails',
'leads',
'opportunity',
'planner',
'sorl.thumbnail',
'phonenumber_field',
'storages',
'marketing',
'tasks',
'invoices',
'events',
'teams',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'crm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates"), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'crm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'dj_crm',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': os.getenv('DB_HOST', '127.0.0.1'),
'PORT': os.getenv('DB_PORT', '5432')
}
}
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static"), ]
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# EMAIL_HOST = 'localhost'
# EMAIL_PORT = 25
# AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend', )
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = os.getenv('SG_USER', '')
EMAIL_HOST_PASSWORD = os.getenv('SG_PWD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
AUTH_USER_MODEL = 'common.User'
STORAGE_TYPE = os.getenv('STORAGE_TYPE', 'normal')
if STORAGE_TYPE == 'normal':
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = (BASE_DIR + '/static',)
COMPRESS_ROOT = BASE_DIR + '/static/'
elif STORAGE_TYPE == 's3-storage':
AWS_STORAGE_BUCKET_NAME = AWS_BUCKET_NAME = os.getenv('AWSBUCKETNAME', '')
AM_ACCESS_KEY = AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', '')
AM_PASS_KEY = AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', '')
S3_DOMAIN = AWS_S3_CUSTOM_DOMAIN = str(AWS_BUCKET_NAME) + '.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
DEFAULT_S3_PATH = "media"
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_S3_PATH = "static"
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']
COMPRESS_JS_FILTERS = ['compressor.filters.jsmin.JSMinFilter']
COMPRESS_REBUILD_TIMEOUT = 5400
MEDIA_ROOT = '/%s/' % DEFAULT_S3_PATH
MEDIA_URL = '//%s/%s/' % (S3_DOMAIN, DEFAULT_S3_PATH)
STATIC_ROOT = "/%s/" % STATIC_S3_PATH
STATIC_URL = 'https://%s/' % (S3_DOMAIN)
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
CORS_ORIGIN_ALLOW_ALL = True
AWS_IS_GZIPPED = True
AWS_ENABLED = True
AWS_S3_SECURE_URLS = True
COMPRESS_ROOT = BASE_DIR + '/static/'
COMPRESS_ENABLED = True
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.CSSMinFilter']
COMPRESS_REBUILD_TIMEOUT = 5400
COMPRESS_OUTPUT_DIR = 'CACHE'
COMPRESS_URL = STATIC_URL
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
('text/x-sass', 'sass {infile} {outfile}'),
('text/x-scss', 'sass {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
}
DEFAULT_FROM_EMAIL = '[email protected]'
# celery Tasks
CELERY_BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_BEAT_SCHEDULE = {
"runs-campaign-for-every-thiry-minutes": {
"task": "marketing.tasks.run_all_campaigns",
"schedule": crontab(minute=30, hour='*')
},
"runs-campaign-for-every-five-minutes": {
"task": "marketing.tasks.list_all_bounces_unsubscribes",
"schedule": crontab(minute='*/5')
},
"runs-scheduled-campaigns-for-every-one-hour": {
"task": "marketing.tasks.send_scheduled_campaigns",
"schedule": crontab(hour='*/1')
},
"runs-scheduled-emails-for-accounts-every-five-minutes": {
"task": "accounts.tasks.send_scheduled_emails",
"schedule": crontab(minute='*/1')
}
}
MAIL_SENDER = 'AMAZON'
INACTIVE_MAIL_SENDER = 'MANDRILL'
AM_ACCESS_KEY = os.getenv('AM_ACCESS_KEY', '')
AM_PASS_KEY = os.getenv('AM_PASS_KEY', '')
AWS_REGION = os.getenv('AWS_REGION', '')
MGUN_API_URL = os.getenv('MGUN_API_URL', '')
MGUN_API_KEY = os.getenv('MGUN_API_KEY', '')
SG_USER = os.getenv('SG_USER', '')
SG_PWD = os.getenv('SG_PWD', '')
MANDRILL_API_KEY = os.getenv('MANDRILL_API_KEY', '')
ADMIN_EMAIL = "[email protected]"
URL_FOR_LINKS = "http://demo.django-crm.io"
try:
from .dev_settings import *
except ImportError:
pass
GP_CLIENT_ID = os.getenv('GP_CLIENT_ID', False)
GP_CLIENT_SECRET = os.getenv('GP_CLIENT_SECRET', False)
ENABLE_GOOGLE_LOGIN = os.getenv('ENABLE_GOOGLE_LOGIN', False)
MARKETING_REPLY_EMAIL = '[email protected]'
PASSWORD_RESET_TIMEOUT_DAYS = 3
SENTRY_ENABLED = os.getenv('SENTRY_ENABLED', False)
if SENTRY_ENABLED and not DEBUG:
if os.getenv('SENTRYDSN') is not None:
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRYDSN', ''),
}
INSTALLED_APPS = INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',
] + MIDDLEWARE
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
# Load the local settings file if it exists
if os.path.isfile('crm/local_settings.py'):
from .local_settings import *
else:
print("No local settings file found")
|
py
|
1a571a7ab6e81538e3ab9b48c937ca39c4626f3f
|
#!/usr/bin/env python3
import os
import stat
def getFileList(fileDir):
fileList = os.listdir(fileDir)
newFileList = []
for eachFile in fileList:
if eachFile[:31] not in newFileList:
newFileList.append(eachFile)
return newFileList
def createSubmitFile(eventRootName):
fileName = str(eventRootName).split("_")[4]
submitComtent = """Universe = vanilla
Notification = Error
Initialdir = /star/u/jhai/selfSC_GL/test/submit/submit6.8_9
Executable = $(Initialdir)/run47.csh
Arguments = $(Process)
Log = $(Initialdir)/log/job47_$(Process).log
Output = $(Initialdir)/log/job47_$(Process).out
Error = $(Initialdir)/log/job47_$(Process).err
GetEnv = True
+Job_Type = "cas"
Queue 1"""
submitComtentResult = submitComtent.split("\n")
fileObject = open(fileName + ".con", "w")
for eachLine in submitComtentResult:
if eachLine.lower().startswith("executable"):
eachLine = "Executable = $(Initialdir)/" + fileName + ".csh"
fileObject.write(eachLine+'\n')
fileObject.close()
def createRunShell(eventRootName):
fileName = str(eventRootName).split("_")[4]
shellContent = """#!/bin/csh
stardev
root4star -b -q -l 'doEvents_SCGL_Calib.C(5000,"./output/*19072018_raw_5000008*")'
"""
shellContentResult = shellContent.split("\n")
fileObject = open(fileName + ".csh", "w")
for eachLine in shellContentResult:
eachLine.strip('\n')
if eachLine.lower().startswith("root4star"):
eachLine = """root4star -b -q -l 'doEvents_SCGL_Calib.C(5000,"/star/u/jhai/scratch/test/""" + eventRootName + "*" + """")'"""
fileObject.write(eachLine+"\n")
fileObject.close()
os.chmod(fileName + ".csh", stat.S_IRWXU)
def main():
fileDir = "/star/u/jhai/scratch/test"
eventFileList = getFileList(fileDir)
for eachEventFile in eventFileList:
eachEventFile = eachEventFile[:31]
createRunShell(eachEventFile)
createSubmitFile(eachEventFile)
if __name__ == "__main__":
main()
|
py
|
1a571b79b400fb6ac1f57ab3ca453c1793c23d4a
|
import autofit as af
import autolens as al
from test_autolens.integration.tests.imaging import runner
test_type = "lens__source_inversion"
test_name = "lens_both__source_rectangular"
data_type = "lens_light__source_smooth"
data_resolution = "lsst"
def make_pipeline(name, phase_folders, optimizer_class=af.MultiNest):
class SourcePix(al.PhaseImaging):
def customize_priors(self, results):
self.galaxies.lens.mass.centre.centre_0 = 0.0
self.galaxies.lens.mass.centre.centre_1 = 0.0
self.galaxies.lens.mass.einstein_radius = 1.6
self.galaxies.source.pixelization.shape_0 = 20.0
self.galaxies.source.pixelization.shape_1 = 20.0
phase1 = SourcePix(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
light=al.lp.SphericalDevVaucouleurs,
mass=al.mp.EllipticalIsothermal,
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.Rectangular,
regularization=al.reg.Constant,
),
),
optimizer_class=optimizer_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 60
phase1.optimizer.sampling_efficiency = 0.8
return al.PipelineDataset(name, phase1)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
|
py
|
1a571c6a462bca4d58c8d936f7458d8c58e5c7a3
|
import csv
import json
import os
import re
from configparser import ConfigParser, NoOptionError, NoSectionError
from itertools import chain, combinations, permutations
from typing import Dict, Iterable, Iterator, List
from decorator import decorator
from logger import logger
from perfrunner.helpers.misc import maybe_atoi, target_hash
CBMONITOR_HOST = 'cbmonitor.sc.couchbase.com'
SHOWFAST_HOST = 'showfast.sc.couchbase.com' # 'localhost:8000'
REPO = 'https://github.com/couchbase/perfrunner'
@decorator
def safe(method, *args, **kwargs):
try:
return method(*args, **kwargs)
except (NoSectionError, NoOptionError) as e:
logger.warn('Failed to get option from config: {}'.format(e))
class Config:
def __init__(self):
self.config = ConfigParser()
self.name = ''
def parse(self, fname: str, override=None):
logger.info('Reading configuration file: {}'.format(fname))
if not os.path.isfile(fname):
logger.interrupt("File doesn't exist: {}".format(fname))
self.config.optionxform = str
self.config.read(fname)
basename = os.path.basename(fname)
self.name = os.path.splitext(basename)[0]
if override is not None:
self.override(override)
def override(self, override: List[str]):
override = [x for x in csv.reader(override, delimiter='.')]
for section, option, value in override:
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, option, value)
@safe
def _get_options_as_dict(self, section: str) -> dict:
if section in self.config.sections():
return {p: v for p, v in self.config.items(section)}
else:
return {}
class ClusterSpec(Config):
@property
def dynamic_infrastructure(self):
if 'infrastructure' in self.config.sections():
return True
else:
return False
@property
def generated_cloud_config_path(self):
if self.dynamic_infrastructure:
return "cloud/infrastructure/generated/infrastructure_config.json"
else:
return None
@property
def infrastructure_settings(self):
return {k: v for k, v in self.config.items('infrastructure')}
@property
def infrastructure_clusters(self):
return {k: v for k, v in self.config.items('clusters')}
@property
def infrastructure_clients(self):
return {k: v for k, v in self.config.items('clients')}
@property
def infrastructure_utilities(self):
return {k: v for k, v in self.config.items('utilities')}
def kubernetes_version(self, cluster_name):
return self.infrastructure_section(cluster_name)\
.get('version', '1.17')
def istio_enabled(self, cluster_name):
istio_enabled = self.infrastructure_section(cluster_name).get('istio_enabled', 0)
istio_enabled = bool(int(istio_enabled))
return istio_enabled
def kubernetes_storage_class(self, cluster_name):
return self.infrastructure_section(cluster_name) \
.get('storage_class', 'default')
def kubernetes_clusters(self):
k8s_clusters = []
if 'k8s' in self.config.sections():
for k, v in self.config.items('k8s'):
k8s_clusters += v.split(",")
return k8s_clusters
def infrastructure_section(self, section: str):
if section in self.config.sections():
return {k: v for k, v in self.config.items(section)}
else:
return {}
def infrastructure_config(self):
infra_config = {}
for section in self.config.sections():
infra_config[section] = {p: v for p, v in self.config.items(section)}
return infra_config
@property
def clusters(self) -> Iterator:
for cluster_name, servers in self.config.items('clusters'):
hosts = [s.split(':')[0] for s in servers.split()]
yield cluster_name, hosts
@property
def masters(self) -> Iterator[str]:
for _, servers in self.clusters:
yield servers[0]
@property
def servers(self) -> List[str]:
servers = []
for _, cluster_servers in self.clusters:
for server in cluster_servers:
servers.append(server)
return servers
def servers_by_role(self, role: str) -> List[str]:
has_service = []
for _, servers in self.config.items('clusters'):
for server in servers.split():
host, roles = server.split(':')
if role in roles:
has_service.append(host)
return has_service
def servers_by_role_from_first_cluster(self, role: str) -> List[str]:
has_service = []
servers = self.config.items('clusters')[0][1]
for server in servers.split():
host, roles = server.split(':')
if role in roles:
has_service.append(host)
return has_service
@property
def roles(self) -> Dict[str, str]:
server_roles = {}
for _, servers in self.config.items('clusters'):
for server in servers.split():
host, roles = server.split(':')
server_roles[host] = roles
return server_roles
@property
def servers_and_roles(self) -> Dict[str, str]:
server_and_roles = []
for _, servers in self.config.items('clusters'):
for server in servers.split():
host, roles = server.split(':')
server_and_roles.append((host, roles))
return server_and_roles
@property
def workers(self) -> List[str]:
if self.dynamic_infrastructure:
client_map = self.infrastructure_clients
clients = []
for k, v in client_map.items():
if "workers" in k:
clients += ["{}.{}".format(k, host) for host in v.split()]
return clients
else:
return self.config.get('clients', 'hosts').split()
@property
def client_credentials(self) -> List[str]:
return self.config.get('clients', 'credentials').split(':')
@property
def data_path(self) -> str:
return self.config.get('storage', 'data')
@property
def index_path(self) -> str:
return self.config.get('storage', 'index',
fallback=self.config.get('storage', 'data'))
@property
def analytics_paths(self) -> List[str]:
analytics_paths = self.config.get('storage', 'analytics', fallback=None)
if analytics_paths is not None:
return analytics_paths.split()
return []
@property
def paths(self) -> Iterator[str]:
for path in set([self.data_path, self.index_path] + self.analytics_paths):
if path is not None:
yield path
@property
def backup(self) -> str:
return self.config.get('storage', 'backup', fallback=None)
@property
def rest_credentials(self) -> List[str]:
return self.config.get('credentials', 'rest').split(':')
@property
def ssh_credentials(self) -> List[str]:
return self.config.get('credentials', 'ssh').split(':')
@property
def aws_key_name(self) -> List[str]:
return self.config.get('credentials', 'aws_key_name')
@property
def parameters(self) -> dict:
return self._get_options_as_dict('parameters')
class TestCaseSettings:
USE_WORKERS = 1
RESET_WORKERS = 0
def __init__(self, options: dict):
self.test_module = '.'.join(options.get('test').split('.')[:-1])
self.test_class = options.get('test').split('.')[-1]
self.use_workers = int(options.get('use_workers', self.USE_WORKERS))
self.reset_workers = int(options.get('reset_workers', self.RESET_WORKERS))
class ShowFastSettings:
THRESHOLD = -10
def __init__(self, options: dict):
self.title = options.get('title')
self.component = options.get('component', '')
self.category = options.get('category', '')
self.sub_category = options.get('sub_category', '')
self.order_by = options.get('orderby', '')
self.build_label = options.get('build_label', '')
self.threshold = int(options.get("threshold", self.THRESHOLD))
class ClusterSettings:
NUM_BUCKETS = 1
INDEX_MEM_QUOTA = 256
FTS_INDEX_MEM_QUOTA = 0
ANALYTICS_MEM_QUOTA = 0
EVENTING_MEM_QUOTA = 0
EVENTING_BUCKET_MEM_QUOTA = 0
EVENTING_METADATA_BUCKET_MEM_QUOTA = 0
EVENTING_METADATA_BUCKET_NAME = 'eventing'
EVENTING_BUCKETS = 0
KERNEL_MEM_LIMIT = 0
KERNEL_MEM_LIMIT_SERVICES = 'fts', 'index'
ONLINE_CORES = 0
ENABLE_CPU_CORES = 'true'
ENABLE_N2N_ENCRYPTION = None
BUCKET_NAME = 'bucket-1'
IPv6 = 0
def __init__(self, options: dict):
self.mem_quota = int(options.get('mem_quota'))
self.index_mem_quota = int(options.get('index_mem_quota',
self.INDEX_MEM_QUOTA))
self.fts_index_mem_quota = int(options.get('fts_index_mem_quota',
self.FTS_INDEX_MEM_QUOTA))
self.analytics_mem_quota = int(options.get('analytics_mem_quota',
self.ANALYTICS_MEM_QUOTA))
self.eventing_mem_quota = int(options.get('eventing_mem_quota',
self.EVENTING_MEM_QUOTA))
self.initial_nodes = [
int(nodes) for nodes in options.get('initial_nodes').split()
]
self.num_buckets = int(options.get('num_buckets',
self.NUM_BUCKETS))
self.eventing_bucket_mem_quota = int(options.get('eventing_bucket_mem_quota',
self.EVENTING_BUCKET_MEM_QUOTA))
self.eventing_metadata_bucket_mem_quota = \
int(options.get('eventing_metadata_bucket_mem_quota',
self.EVENTING_METADATA_BUCKET_MEM_QUOTA))
self.eventing_buckets = int(options.get('eventing_buckets',
self.EVENTING_BUCKETS))
self.num_vbuckets = options.get('num_vbuckets')
self.online_cores = int(options.get('online_cores',
self.ONLINE_CORES))
self.enable_cpu_cores = maybe_atoi(options.get('enable_cpu_cores', self.ENABLE_CPU_CORES))
self.ipv6 = int(options.get('ipv6', self.IPv6))
self.kernel_mem_limit = options.get('kernel_mem_limit',
self.KERNEL_MEM_LIMIT)
self.enable_n2n_encryption = options.get('enable_n2n_encryption',
self.ENABLE_N2N_ENCRYPTION)
kernel_mem_limit_services = options.get('kernel_mem_limit_services')
if kernel_mem_limit_services:
self.kernel_mem_limit_services = kernel_mem_limit_services.split()
else:
self.kernel_mem_limit_services = self.KERNEL_MEM_LIMIT_SERVICES
self.bucket_name = options.get('bucket_name', self.BUCKET_NAME)
class StatsSettings:
ENABLED = 1
POST_TO_SF = 0
INTERVAL = 5
LAT_INTERVAL = 1
POST_CPU = 0
CLIENT_PROCESSES = []
SERVER_PROCESSES = ['beam.smp',
'cbft',
'cbq-engine',
'indexer',
'memcached']
TRACED_PROCESSES = []
def __init__(self, options: dict):
self.enabled = int(options.get('enabled', self.ENABLED))
self.post_to_sf = int(options.get('post_to_sf', self.POST_TO_SF))
self.interval = int(options.get('interval', self.INTERVAL))
self.lat_interval = float(options.get('lat_interval',
self.LAT_INTERVAL))
self.post_cpu = int(options.get('post_cpu', self.POST_CPU))
self.client_processes = self.CLIENT_PROCESSES + \
options.get('client_processes', '').split()
self.server_processes = self.SERVER_PROCESSES + \
options.get('server_processes', '').split()
self.traced_processes = self.TRACED_PROCESSES + \
options.get('traced_processes', '').split()
class ProfilingSettings:
INTERVAL = 300 # 5 minutes
NUM_PROFILES = 1
PROFILES = 'cpu'
SERVICES = ''
LINUX_PERF_PROFILE_DURATION = 10 # seconds
LINUX_PERF_FREQUENCY = 99
LINUX_PERF_CALLGRAPH = 'lbr' # optional lbr, dwarf
LINUX_PERF_DELAY_MULTIPLIER = 2
def __init__(self, options: dict):
self.services = options.get('services',
self.SERVICES).split()
self.interval = int(options.get('interval',
self.INTERVAL))
self.num_profiles = int(options.get('num_profiles',
self.NUM_PROFILES))
self.profiles = options.get('profiles',
self.PROFILES).split(',')
self.linux_perf_profile_duration = int(options.get('linux_perf_profile_duration',
self.LINUX_PERF_PROFILE_DURATION))
self.linux_perf_profile_flag = bool(options.get('linux_perf_profile_flag'))
self.linux_perf_frequency = int(options.get('linux_perf_frequency',
self.LINUX_PERF_FREQUENCY))
self.linux_perf_callgraph = options.get('linux_perf_callgraph',
self.LINUX_PERF_CALLGRAPH)
self.linux_perf_delay_multiplier = int(options.get('linux_perf_delay_multiplier',
self.LINUX_PERF_DELAY_MULTIPLIER))
class BucketSettings:
PASSWORD = 'password'
REPLICA_NUMBER = 1
REPLICA_INDEX = 0
EVICTION_POLICY = 'valueOnly' # alt: fullEviction
BUCKET_TYPE = 'membase' # alt: ephemeral
AUTOFAILOVER_ENABLED = 'true'
FAILOVER_MIN = 5
FAILOVER_MAX = 30
BACKEND_STORAGE = None
def __init__(self, options: dict):
self.password = options.get('password', self.PASSWORD)
self.replica_number = int(
options.get('replica_number', self.REPLICA_NUMBER)
)
self.replica_index = int(
options.get('replica_index', self.REPLICA_INDEX)
)
self.eviction_policy = options.get('eviction_policy',
self.EVICTION_POLICY)
self.bucket_type = options.get('bucket_type',
self.BUCKET_TYPE)
self.conflict_resolution_type = options.get('conflict_resolution_type')
self.compression_mode = options.get('compression_mode')
if options.get('autofailover_enabled', self.AUTOFAILOVER_ENABLED).lower() == "false":
self.autofailover_enabled = 'false'
else:
self.autofailover_enabled = 'true'
self.failover_min = int(options.get('failover_min', self.FAILOVER_MIN))
self.failover_max = int(options.get('failover_max', self.FAILOVER_MAX))
self.backend_storage = options.get('backend_storage', self.BACKEND_STORAGE)
class CollectionSettings:
CONFIG = None
COLLECTION_MAP = None
USE_BULK_API = 1
def __init__(self, options: dict):
self.config = options.get('config', self.CONFIG)
self.collection_map = self.COLLECTION_MAP
self.use_bulk_api = int(options.get('use_bulk_api', self.USE_BULK_API))
if self.config is not None:
with open(self.config) as f:
self.collection_map = json.load(f)
class UserSettings:
NUM_USERS_PER_BUCKET = 0
def __init__(self, options: dict):
self.num_users_per_bucket = int(options.get('num_users_per_bucket',
self.NUM_USERS_PER_BUCKET))
class CompactionSettings:
DB_PERCENTAGE = 30
VIEW_PERCENTAGE = 30
PARALLEL = True
BUCKET_COMPACTION = 'true'
def __init__(self, options: dict):
self.db_percentage = options.get('db_percentage',
self.DB_PERCENTAGE)
self.view_percentage = options.get('view_percentage',
self.VIEW_PERCENTAGE)
self.parallel = options.get('parallel', self.PARALLEL)
self.bucket_compaction = options.get('bucket_compaction', self.BUCKET_COMPACTION)
def __str__(self):
return str(self.__dict__)
class RebalanceSettings:
SWAP = 0
FAILOVER = 'hard' # Atl: graceful
DELTA_RECOVERY = 0 # Full recovery by default
DELAY_BEFORE_FAILOVER = 600
START_AFTER = 1200
STOP_AFTER = 1200
FTS_PARTITIONS = "1"
FTS_MAX_DCP_PARTITIONS = "0"
def __init__(self, options: dict):
nodes_after = options.get('nodes_after', '').split()
self.nodes_after = [int(num_nodes) for num_nodes in nodes_after]
self.swap = int(options.get('swap', self.SWAP))
self.failed_nodes = int(options.get('failed_nodes', 1))
self.failover = options.get('failover', self.FAILOVER)
self.delay_before_failover = int(options.get('delay_before_failover',
self.DELAY_BEFORE_FAILOVER))
self.delta_recovery = int(options.get('delta_recovery',
self.DELTA_RECOVERY))
self.start_after = int(options.get('start_after', self.START_AFTER))
self.stop_after = int(options.get('stop_after', self.STOP_AFTER))
# The reblance settings for FTS
self.ftspartitions = options.get('ftspartitions', self.FTS_PARTITIONS)
self.fts_max_dcp_partitions = options.get('fts_max_dcp_partitions',
self.FTS_MAX_DCP_PARTITIONS)
self.fts_node_level_parameters = {}
if self.ftspartitions != self.FTS_PARTITIONS:
self.fts_node_level_parameters["maxConcurrentPartitionMovesPerNode"] = \
self.ftspartitions
if self.fts_max_dcp_partitions != self.FTS_MAX_DCP_PARTITIONS:
self.fts_node_level_parameters["maxFeedsPerDCPAgent"] = self.fts_max_dcp_partitions
class PhaseSettings:
TIME = 3600 * 24
DOC_GEN = 'basic'
POWER_ALPHA = 0
ZIPF_ALPHA = 0
KEY_PREFIX = None
CREATES = 0
READS = 0
UPDATES = 0
DELETES = 0
READS_AND_UPDATES = 0
FTS_UPDATES = 0
TTL = 0
OPS = 0
TARGET = 0
HOT_READS = False
SEQ_UPSERTS = False
BATCH_SIZE = 1000
BATCHES = 1
SPRING_BATCH_SIZE = 100
ITERATIONS = 1
ASYNC = False
KEY_FMTR = 'decimal'
ITEMS = 0
SIZE = 2048
PHASE = 0
INSERT_TEST_FLAG = 0
MEM_LOW_WAT = 0
MEM_HIGH_WAT = 0
WORKING_SET = 100
WORKING_SET_ACCESS = 100
WORKING_SET_MOVE_TIME = 0
WORKING_SET_MOVE_DOCS = 0
THROUGHPUT = float('inf')
QUERY_THROUGHPUT = float('inf')
N1QL_THROUGHPUT = float('inf')
VIEW_QUERY_PARAMS = '{}'
WORKERS = 0
QUERY_WORKERS = 0
N1QL_WORKERS = 0
FTS_DATA_SPREAD_WORKERS = None
FTS_DATA_SPREAD_WORKER_TYPE = "default"
WORKLOAD_INSTANCES = 1
N1QL_OP = 'read'
N1QL_BATCH_SIZE = 100
N1QL_TIMEOUT = 0
ARRAY_SIZE = 10
NUM_CATEGORIES = 10 ** 6
NUM_REPLIES = 100
RANGE_DISTANCE = 10
ITEM_SIZE = 64
SIZE_VARIATION_MIN = 1
SIZE_VARIATION_MAX = 1024
RECORDED_LOAD_CACHE_SIZE = 0
INSERTS_PER_WORKERINSTANCE = 0
RUN_EXTRA_ACCESS = 'false'
EPOLL = 'true'
BOOST = 48
YCSB_FIELD_COUNT = 10
YCSB_FIELD_LENGTH = 100
YCSB_INSERTSTART = 0
SSL_MODE = 'none'
SSL_AUTH_KEYSTORE = "certificates/auth.keystore"
SSL_DATA_KEYSTORE = "certificates/data.keystore"
SSL_KEYSTOREPASS = "storepass"
CERTIFICATE_FILE = "root.pem"
SHOW_TLS_VERSION = False
CIPHER_LIST = None
MIN_TLS_VERSION = None
PERSIST_TO = 0
REPLICATE_TO = 0
TIMESERIES = 0
CBCOLLECT = 0
CONNSTR_PARAMS = "{'ipv6': 'allow', 'enable_tracing': 'false'}"
YCSB_CLIENT = 'couchbase2'
DURABILITY = None
YCSB_KV_ENDPOINTS = 1
YCSB_ENABLE_MUTATION_TOKEN = None
YCSB_RETRY_STRATEGY = 'default'
YCSB_RETRY_LOWER = 1
YCSB_RETRY_UPPER = 500
YCSB_RETRY_FACTOR = 2
YCSB_OUT_OF_ORDER = 0
YCSB_SPLIT_WORKLOAD = 0
TRANSACTIONSENABLED = 0
NUM_ATRS = 1024
YCSB_JVM_ARGS = None
TPCDS_SCALE_FACTOR = 1
DOCUMENTSINTRANSACTION = 4
TRANSACTIONREADPROPORTION = 0.25
TRANSACTIONUPDATEPROPORTION = 0.75
TRANSACTIONINSERTPROPORTION = 0
REQUESTDISTRIBUTION = 'zipfian'
ANALYTICS_WARMUP_OPS = 0
ANALYTICS_WARMUP_WORKERS = 0
COLLECTION_MAP = None
CUSTOM_PILLOWFIGHT = False
USERS = None
USER_MOD_THROUGHPUT = float('inf')
USER_MOD_WORKERS = 0
COLLECTION_MOD_WORKERS = 0
COLLECTION_MOD_THROUGHPUT = float('inf')
JAVA_DCP_STREAM = 'all'
JAVA_DCP_CONFIG = None
JAVA_DCP_CLIENTS = 0
SPLIT_WORKLOAD = None
SPLIT_WORKLOAD_THROUGHPUT = 0
SPLIT_WORKLOAD_WORKERS = 0
DOCUMENT_GROUPS = 1
def __init__(self, options: dict):
# Common settings
self.time = int(options.get('time', self.TIME))
# KV settings
self.doc_gen = options.get('doc_gen', self.DOC_GEN)
self.power_alpha = float(options.get('power_alpha', self.POWER_ALPHA))
self.zipf_alpha = float(options.get('zipf_alpha', self.ZIPF_ALPHA))
self.key_prefix = options.get('key_prefix', self.KEY_PREFIX)
self.size = int(options.get('size', self.SIZE))
self.items = int(options.get('items', self.ITEMS))
self.phase = int(options.get('phase', self.PHASE))
self.insert_test_flag = int(options.get('insert_test_flag', self.INSERT_TEST_FLAG))
self.mem_low_wat = int(options.get('mem_low_wat', self.MEM_LOW_WAT))
self.mem_high_wat = int(options.get('mem_high_wat', self.MEM_HIGH_WAT))
self.creates = int(options.get('creates', self.CREATES))
self.reads = int(options.get('reads', self.READS))
self.updates = int(options.get('updates', self.UPDATES))
self.deletes = int(options.get('deletes', self.DELETES))
self.ttl = int(options.get('ttl', self.TTL))
self.reads_and_updates = int(options.get('reads_and_updates',
self.READS_AND_UPDATES))
self.fts_updates_swap = int(options.get('fts_updates_swap',
self.FTS_UPDATES))
self.fts_updates_reverse = int(options.get('fts_updates_reverse',
self.FTS_UPDATES))
self.ops = float(options.get('ops', self.OPS))
self.throughput = float(options.get('throughput', self.THROUGHPUT))
self.working_set = float(options.get('working_set', self.WORKING_SET))
self.working_set_access = int(options.get('working_set_access',
self.WORKING_SET_ACCESS))
self.working_set_move_time = int(options.get('working_set_move_time',
self.WORKING_SET_MOVE_TIME))
self.working_set_moving_docs = int(options.get('working_set_moving_docs',
self.WORKING_SET_MOVE_DOCS))
self.workers = int(options.get('workers', self.WORKERS))
self.async = bool(int(options.get('async', self.ASYNC)))
self.key_fmtr = options.get('key_fmtr', self.KEY_FMTR)
self.hot_reads = self.HOT_READS
self.seq_upserts = self.SEQ_UPSERTS
self.iterations = int(options.get('iterations', self.ITERATIONS))
self.batch_size = int(options.get('batch_size', self.BATCH_SIZE))
self.batches = int(options.get('batches', self.BATCHES))
self.spring_batch_size = int(options.get('spring_batch_size', self.SPRING_BATCH_SIZE))
self.workload_instances = int(options.get('workload_instances',
self.WORKLOAD_INSTANCES))
self.connstr_params = eval(options.get('connstr_params', self.CONNSTR_PARAMS))
self.run_extra_access = maybe_atoi(options.get('run_extra_access', self.RUN_EXTRA_ACCESS))
# Views settings
self.ddocs = None
self.index_type = None
self.query_params = eval(options.get('query_params',
self.VIEW_QUERY_PARAMS))
self.query_workers = int(options.get('query_workers',
self.QUERY_WORKERS))
self.query_throughput = float(options.get('query_throughput',
self.QUERY_THROUGHPUT))
# N1QL settings
self.n1ql_gen = options.get('n1ql_gen')
self.n1ql_workers = int(options.get('n1ql_workers', self.N1QL_WORKERS))
self.n1ql_op = options.get('n1ql_op', self.N1QL_OP)
self.n1ql_throughput = float(options.get('n1ql_throughput',
self.N1QL_THROUGHPUT))
self.n1ql_batch_size = int(options.get('n1ql_batch_size',
self.N1QL_BATCH_SIZE))
self.array_size = int(options.get('array_size', self.ARRAY_SIZE))
self.num_categories = int(options.get('num_categories',
self.NUM_CATEGORIES))
self.num_replies = int(options.get('num_replies', self.NUM_REPLIES))
self.range_distance = int(options.get('range_distance',
self.RANGE_DISTANCE))
self.n1ql_timeout = int(options.get('n1ql_timeout', self.N1QL_TIMEOUT))
if 'n1ql_queries' in options:
self.n1ql_queries = options.get('n1ql_queries').strip().split(',')
# 2i settings
self.item_size = int(options.get('item_size', self.ITEM_SIZE))
self.size_variation_min = int(options.get('size_variation_min',
self.SIZE_VARIATION_MIN))
self.size_variation_max = int(options.get('size_variation_max',
self.SIZE_VARIATION_MAX))
# YCSB settings
self.workload_path = options.get('workload_path')
self.recorded_load_cache_size = int(options.get('recorded_load_cache_size',
self.RECORDED_LOAD_CACHE_SIZE))
self.inserts_per_workerinstance = int(options.get('inserts_per_workerinstance',
self.INSERTS_PER_WORKERINSTANCE))
self.epoll = options.get("epoll", self.EPOLL)
self.boost = options.get('boost', self.BOOST)
self.target = float(options.get('target', self.TARGET))
self.field_count = int(options.get('field_count', self.YCSB_FIELD_COUNT))
self.field_length = int(options.get('field_length', self.YCSB_FIELD_LENGTH))
self.kv_endpoints = int(options.get('kv_endpoints', self.YCSB_KV_ENDPOINTS))
self.enable_mutation_token = options.get('enable_mutation_token',
self.YCSB_ENABLE_MUTATION_TOKEN)
self.ycsb_client = options.get('ycsb_client', self.YCSB_CLIENT)
self.ycsb_out_of_order = int(options.get('out_of_order', self.YCSB_OUT_OF_ORDER))
self.insertstart = int(options.get('insertstart', self.YCSB_INSERTSTART))
self.ycsb_split_workload = int(options.get('ycsb_split_workload', self.YCSB_SPLIT_WORKLOAD))
# trasnsaction settings
self.transactionsenabled = int(options.get('transactionsenabled',
self.TRANSACTIONSENABLED))
self.documentsintransaction = int(options.get('documentsintransaction',
self.DOCUMENTSINTRANSACTION))
self.transactionreadproportion = options.get('transactionreadproportion',
self.TRANSACTIONREADPROPORTION)
self.transactionupdateproportion = options.get('transactionupdateproportion',
self.TRANSACTIONUPDATEPROPORTION)
self.transactioninsertproportion = options.get('transactioninsertproportion',
self.TRANSACTIONINSERTPROPORTION)
self.requestdistribution = options.get('requestdistribution',
self.REQUESTDISTRIBUTION)
# multiple of 1024
self.num_atrs = int(options.get('num_atrs', self.NUM_ATRS))
# Subdoc & XATTR
self.subdoc_field = options.get('subdoc_field')
self.xattr_field = options.get('xattr_field')
# SSL settings
self.ssl_mode = (options.get('ssl_mode', self.SSL_MODE))
self.ssl_keystore_password = self.SSL_KEYSTOREPASS
if self.ssl_mode == 'auth':
self.ssl_keystore_file = self.SSL_AUTH_KEYSTORE
else:
self.ssl_keystore_file = self.SSL_DATA_KEYSTORE
self.certificate_file = self.CERTIFICATE_FILE
self.show_tls_version = options.get('show_tls_version', self.SHOW_TLS_VERSION)
self.cipher_list = options.get('cipher_list', self.CIPHER_LIST)
if self.cipher_list:
self.cipher_list = self.cipher_list.split(',')
self.min_tls_version = options.get('min_tls_version',
self.MIN_TLS_VERSION)
# Durability settings
self.durability_set = False
if options.get('persist_to', None) or \
options.get('replicate_to', None) or \
options.get('durability', None):
self.durability_set = True
self.replicate_to = int(options.get('replicate_to', self.REPLICATE_TO))
self.persist_to = int(options.get('persist_to', self.PERSIST_TO))
if options.get('durability', self.DURABILITY) is not None:
self.durability = int(options.get('durability'))
else:
self.durability = self.DURABILITY
# YCSB Retry Strategy settings
self.retry_strategy = options.get('retry_strategy', self.YCSB_RETRY_STRATEGY)
self.retry_lower = int(options.get('retry_lower', self.YCSB_RETRY_LOWER))
self.retry_upper = int(options.get('retry_upper', self.YCSB_RETRY_UPPER))
self.retry_factor = int(options.get('retry_factor', self.YCSB_RETRY_FACTOR))
# CbCollect Setting
self.cbcollect = int(options.get('cbcollect',
self.CBCOLLECT))
# Latency Setting
self.timeseries = int(options.get('timeseries',
self.TIMESERIES))
self.ycsb_jvm_args = options.get('ycsb_jvm_args', self.YCSB_JVM_ARGS)
self.tpcds_scale_factor = int(options.get('tpcds_scale_factor', self.TPCDS_SCALE_FACTOR))
self.analytics_warmup_ops = int(options.get('analytics_warmup_ops',
self.ANALYTICS_WARMUP_OPS))
self.analytics_warmup_workers = int(options.get('analytics_warmup_workers',
self.ANALYTICS_WARMUP_WORKERS))
# collection map placeholder
self.collections = self.COLLECTION_MAP
self.custom_pillowfight = self.CUSTOM_PILLOWFIGHT
self.users = self.USERS
self.user_mod_workers = int(options.get('user_mod_workers', self.USER_MOD_WORKERS))
self.user_mod_throughput = float(options.get('user_mod_throughput',
self.USER_MOD_THROUGHPUT))
self.collection_mod_workers = int(options.get('collection_mod_workers',
self.COLLECTION_MOD_WORKERS))
self.collection_mod_throughput = float(options.get('collection_mod_throughput',
self.COLLECTION_MOD_THROUGHPUT))
self.java_dcp_stream = self.JAVA_DCP_STREAM
self.java_dcp_config = self.JAVA_DCP_CONFIG
self.java_dcp_clients = self.JAVA_DCP_CLIENTS
self.doc_groups = int(options.get('doc_groups', self.DOCUMENT_GROUPS))
self.fts_data_spread_workers = options.get(
'fts_data_spread_workers',
self.FTS_DATA_SPREAD_WORKERS
)
if self.fts_data_spread_workers is not None:
self.fts_data_spread_workers = int(self.fts_data_spread_workers)
self.fts_data_spread_worker_type = "default"
self.split_workload = options.get('split_workload', self.SPLIT_WORKLOAD)
self.split_workload_throughput = options.get('split_workload_throughput',
self.SPLIT_WORKLOAD_THROUGHPUT)
self.split_workload_workers = options.get('split_workload_throughput',
self.SPLIT_WORKLOAD_WORKERS)
def __str__(self) -> str:
return str(self.__dict__)
class LoadSettings(PhaseSettings):
CREATES = 100
SEQ_UPSERTS = True
class JTSAccessSettings(PhaseSettings):
JTS_REPO = "https://github.com/couchbaselabs/JTS"
JTS_REPO_BRANCH = "master"
JTS_HOME_DIR = "JTS"
JTS_RUN_CMD = "java -jar target/JTS-1.0-jar-with-dependencies.jar"
JTS_LOGS_DIR = "JTSlogs"
FTS_PARTITIONS = "1"
FTS_MAX_DCP_PARTITIONS = "0"
def __init__(self, options: dict):
self.jts_repo = self.JTS_REPO
self.jts_home_dir = self.JTS_HOME_DIR
self.jts_run_cmd = self.JTS_RUN_CMD
self.jts_logs_dir = self.JTS_LOGS_DIR
self.jts_repo_branch = options.get("jts_repo_branch", self.JTS_REPO_BRANCH)
self.jts_instances = options.get("jts_instances", "1")
self.test_total_docs = options.get("test_total_docs", "1000000")
self.test_query_workers = options.get("test_query_workers", "10")
self.test_kv_workers = options.get("test_kv_workers", "0")
self.test_kv_throughput_goal = options.get("test_kv_throughput_goal", "1000")
self.test_data_file = options.get("test_data_file", "../tests/fts/low.txt")
self.test_driver = options.get("test_driver", "couchbase")
self.test_stats_limit = options.get("test_stats_limit", "1000000")
self.test_stats_aggregation_step = options.get("test_stats_aggregation_step", "1000")
self.test_debug = options.get("test_debug", "false")
self.test_query_type = options.get("test_query_type", "term")
self.test_query_limit = options.get("test_query_limit", "10")
self.test_query_field = options.get("test_query_field", "text")
self.test_mutation_field = options.get("test_mutation_field", "text2")
self.test_worker_type = options.get("test_worker_type", "latency")
self.couchbase_index_name = options.get("couchbase_index_name", "perf_fts_index")
self.couchbase_index_configfile = options.get("couchbase_index_configfile")
self.couchbase_index_type = options.get("couchbase_index_type")
self.workload_instances = int(self.jts_instances)
self.time = options.get('test_duration', "600")
self.warmup_query_workers = options.get("warmup_query_workers", "0")
self.warmup_time = options.get('warmup_time', "0")
# Geo Queries parameters
self.test_geo_polygon_coord_list = options.get("test_geo_polygon_coord_list", "")
self.test_query_lon_width = options.get("test_query_lon_width", "2")
self.test_query_lat_height = options.get("test_query_lat_height", "2")
self.test_geo_distance = options.get("test_geo_distance", "5mi")
# Flex Queries parameters
self.test_flex = options.get("test_flex", 'false')
self.test_flex_query_type = options.get('test_flex_query_type', 'array_predicate')
# Collection settings
self.test_collection_query_mode = options.get('test_collection_query_mode', 'default')
# Number of indexes per index - group
self.indexes_per_group = int(options.get('indexes_per_group', '1'))
# index_group is the number of collections per index
# if index_group is 1; all the collections are present in the index_def type mapping
self.index_groups = int(options.get('index_groups', '1'))
self.fts_index_map = {}
self.collections_enabled = False
self.test_collection_specific_count = \
int(options.get('test_collection_specific_count', '1'))
# Extra parameters for the FTS debugging
self.ftspartitions = options.get('ftspartitions', self.FTS_PARTITIONS)
self.fts_max_dcp_partitions = options.get('fts_max_dcp_partitions',
self.FTS_MAX_DCP_PARTITIONS)
self.fts_node_level_parameters = {}
if self.ftspartitions != self.FTS_PARTITIONS:
self.fts_node_level_parameters["maxConcurrentPartitionMovesPerNode"] = \
self.ftspartitions
if self.fts_max_dcp_partitions != self.FTS_MAX_DCP_PARTITIONS:
self.fts_node_level_parameters["maxFeedsPerDCPAgent"] = self.fts_max_dcp_partitions
def __str__(self) -> str:
return str(self.__dict__)
class HotLoadSettings(PhaseSettings):
HOT_READS = True
class XattrLoadSettings(PhaseSettings):
SEQ_UPSERTS = True
class RestoreSettings:
BACKUP_STORAGE = '/backups'
BACKUP_REPO = ''
IMPORT_FILE = ''
DOCS_PER_COLLECTION = 0
THREADS = 16
MAP_DATA = None
def __init__(self, options):
self.docs_per_collections = int(options.get('docs_per_collection',
self.DOCS_PER_COLLECTION))
self.backup_storage = options.get('backup_storage', self.BACKUP_STORAGE)
self.backup_repo = options.get('backup_repo', self.BACKUP_REPO)
self.import_file = options.get('import_file', self.IMPORT_FILE)
self.threads = options.get('threads', self.THREADS)
self.map_data = options.get('map_data', self.MAP_DATA)
def __str__(self) -> str:
return str(self.__dict__)
class ImportSettings:
IMPORT_FILE = ''
DOCS_PER_COLLECTION = 0
def __init__(self, options):
self.docs_per_collections = int(options.get('docs_per_collection',
self.DOCS_PER_COLLECTION))
self.import_file = options.get('import_file', self.IMPORT_FILE)
def __str__(self) -> str:
return str(self.__dict__)
class XDCRSettings:
WAN_DELAY = 0
NUM_XDCR_LINKS = 1
XDCR_LINKS_PRIORITY = 'HIGH'
INITIAL_COLLECTION_MAPPING = '' # std format {"scope-1:collection-1":"scope-1:collection-1"}
BACKFILL_COLLECTION_MAPPING = '' # ----------------------"----------------------------------
def __init__(self, options: dict):
self.demand_encryption = options.get('demand_encryption')
self.filter_expression = options.get('filter_expression')
self.secure_type = options.get('secure_type')
self.wan_delay = int(options.get('wan_delay',
self.WAN_DELAY))
self.num_xdcr_links = int(options.get('num_xdcr_links', self.NUM_XDCR_LINKS))
self.xdcr_links_priority = options.get('xdcr_links_priority',
self.XDCR_LINKS_PRIORITY).split(',')
self.initial_collection_mapping = options.get('initial_collection_mapping',
self.INITIAL_COLLECTION_MAPPING)
self.backfill_collection_mapping = options.get('backfill_collection_mapping',
self.BACKFILL_COLLECTION_MAPPING)
self.collections_oso_mode = bool(options.get('collections_oso_mode'))
def __str__(self) -> str:
return str(self.__dict__)
class ViewsSettings:
VIEWS = '[1]'
DISABLED_UPDATES = 0
def __init__(self, options: dict):
self.views = eval(options.get('views', self.VIEWS))
self.disabled_updates = int(options.get('disabled_updates',
self.DISABLED_UPDATES))
self.index_type = options.get('index_type')
def __str__(self) -> str:
return str(self.__dict__)
class GSISettings:
CBINDEXPERF_CONFIGFILE = ''
CBINDEXPERF_CONCURRENCY = 0
CBINDEXPERF_REPEAT = 0
CBINDEXPERF_CONFIGFILES = ''
RUN_RECOVERY_TEST = 0
INCREMENTAL_LOAD_ITERATIONS = 0
SCAN_TIME = 1200
INCREMENTAL_ONLY = 0
REPORT_INITIAL_BUILD_TIME = 0
DISABLE_PERINDEX_STATS = False
def __init__(self, options: dict):
self.indexes = {}
if options.get('indexes') is not None:
myindexes = options.get('indexes')
if ".json" in myindexes:
# index definitions passed in as json file
with open(myindexes) as f:
self.indexes = json.load(f)
else:
for index_def in myindexes.split('#'):
name, field = index_def.split(':')
if '"' in field:
field = field.replace('"', '\\\"')
self.indexes[name] = field
self.cbindexperf_configfile = options.get('cbindexperf_configfile',
self.CBINDEXPERF_CONFIGFILE)
self.cbindexperf_concurrency = int(options.get('cbindexperf_concurrency',
self.CBINDEXPERF_CONCURRENCY))
self.cbindexperf_repeat = int(options.get('cbindexperf_repeat',
self.CBINDEXPERF_REPEAT))
self.cbindexperf_configfiles = options.get('cbindexperf_configfiles',
self.CBINDEXPERF_CONFIGFILES)
self.run_recovery_test = int(options.get('run_recovery_test',
self.RUN_RECOVERY_TEST))
self.incremental_only = int(options.get('incremental_only',
self.INCREMENTAL_ONLY))
self.incremental_load_iterations = int(options.get('incremental_load_iterations',
self.INCREMENTAL_LOAD_ITERATIONS))
self.scan_time = int(options.get('scan_time', self.SCAN_TIME))
self.report_initial_build_time = int(options.get('report_initial_build_time',
self.REPORT_INITIAL_BUILD_TIME))
self.disable_perindex_stats = options.get('disable_perindex_stats',
self.DISABLE_PERINDEX_STATS)
self.settings = {}
for option in options:
if option.startswith(('indexer', 'projector', 'queryport')):
value = options.get(option)
if '.' in value:
self.settings[option] = maybe_atoi(value, t=float)
else:
self.settings[option] = maybe_atoi(value, t=int)
if self.settings:
if self.settings['indexer.settings.storage_mode'] == 'forestdb' or \
self.settings['indexer.settings.storage_mode'] == 'plasma':
self.storage = self.settings['indexer.settings.storage_mode']
else:
self.storage = 'memdb'
def __str__(self) -> str:
return str(self.__dict__)
class DCPSettings:
NUM_CONNECTIONS = 4
INVOKE_WARM_UP = 0
def __init__(self, options: dict):
self.num_connections = int(options.get('num_connections',
self.NUM_CONNECTIONS))
self.invoke_warm_up = int(options.get('invoke_warm_up',
self.INVOKE_WARM_UP))
def __str__(self) -> str:
return str(self.__dict__)
class N1QLSettings:
def __init__(self, options: dict):
self.cbq_settings = {
option: maybe_atoi(value) for option, value in options.items()
}
def __str__(self) -> str:
return str(self.__dict__)
class IndexSettings:
FTS_INDEX_NAME = ''
FTS_INDEX_CONFIG_FILE = ''
TOP_DOWN = False
INDEXES_PER_COLLECTION = 1
REPLICAS = 0
def __init__(self, options: dict):
self.raw_statements = options.get('statements')
self.fields = options.get('fields')
self.replicas = int(options.get('replicas', self.REPLICAS))
self.collection_map = options.get('collection_map')
self.indexes_per_collection = int(options.get('indexes_per_collection',
self.INDEXES_PER_COLLECTION))
self.top_down = bool(options.get('top_down', self.TOP_DOWN))
self.couchbase_fts_index_name = options.get('couchbase_fts_index_name',
self.FTS_INDEX_NAME)
self.couchbase_fts_index_configfile = options.get('couchbase_fts_index_configfile',
self.FTS_INDEX_CONFIG_FILE)
self.statements = self.create_index_statements()
def create_index_statements(self) -> List[str]:
# Here we generate all permutations of all subsets of index fields
# The total number generate given n fields is the following:
#
# Sum from k=0 to n, n!/k! where
#
# n=3 sum = 16
# n=4 sum = 65
# n=5 sum = 326
# n=6 sum = 1957
if self.collection_map and self.fields:
statements = []
build_statements = []
if self.fields.strip() == 'primary':
for bucket in self.collection_map.keys():
for scope in self.collection_map[bucket].keys():
for collection in self.collection_map[bucket][scope].keys():
index_num = 1
if self.collection_map[bucket][scope][collection]['load'] == 1:
collection_num = collection.replace("collection-", "")
index_name = 'pi{}_{}'\
.format(collection_num, index_num)
new_statement = \
"CREATE PRIMARY INDEX {} ON default:`{}`.`{}`.`{}`". \
format(index_name, bucket, scope, collection)
with_clause = " WITH {'defer_build': 'true',"
if self.replicas > 0:
with_clause += "'num_replica': " + str(self.replicas) + ","
with_clause = with_clause[:-1]
with_clause += "}"
new_statement += with_clause
statements.append(new_statement)
build_statement = "BUILD INDEX ON default:`{}`.`{}`.`{}`('{}')" \
.format(bucket, scope, collection, index_name)
build_statements.append(build_statement)
index_num += 1
else:
fields = self.fields.strip().split(',')
parsed_fields = []
index = 1
for field in fields:
while field.count("(") != field.count(")"):
field = ",".join([field, fields[index]])
del fields[index]
index += 1
parsed_fields.append(field)
fields = parsed_fields
field_combos = list(chain.from_iterable(combinations(fields, r)
for r in range(1, len(fields)+1)))
if self.top_down:
field_combos.reverse()
for bucket in self.collection_map.keys():
for scope in self.collection_map[bucket].keys():
for collection in self.collection_map[bucket][scope].keys():
if self.collection_map[bucket][scope][collection]['load'] == 1:
indexes_created = 0
collection_num = collection.replace("collection-", "")
for field_subset in field_combos:
subset_permutations = list(permutations(list(field_subset)))
for permutation in subset_permutations:
index_field_list = list(permutation)
index_name = "i{}_{}".format(collection_num,
str(indexes_created+1))
index_fields = ",".join(index_field_list)
new_statement = \
"CREATE INDEX {} ON default:`{}`.`{}`.`{}`({})".\
format(
index_name,
bucket,
scope,
collection,
index_fields)
with_clause = " WITH {'defer_build': 'true',"
if self.replicas > 0:
with_clause += \
"'num_replica': " + str(self.replicas) + ","
with_clause = with_clause[:-1]
with_clause += "}"
new_statement += with_clause
statements.append(new_statement)
build_statement = \
"BUILD INDEX ON default:`{}`.`{}`.`{}`('{}')" \
.format(bucket, scope, collection, index_name)
build_statements.append(build_statement)
indexes_created += 1
if indexes_created == self.indexes_per_collection:
break
if indexes_created == self.indexes_per_collection:
break
statements = statements + build_statements
return statements
elif self.raw_statements:
return self.raw_statements.strip().split('\n')
elif self.raw_statements is None and self.fields is None:
return []
else:
raise Exception('Index options must include one statement, '
'or fields (if collections enabled)')
@property
def indexes(self):
if self.collection_map:
indexes = []
for statement in self.statements:
match = re.search(r'CREATE .*INDEX (.*) ON', statement)
if match:
indexes.append(match.group(1))
indexes_per_collection = set(indexes)
index_map = {}
for bucket in self.collection_map.keys():
for scope in self.collection_map[bucket].keys():
for collection in self.collection_map[bucket][scope].keys():
if self.collection_map[bucket][scope][collection]['load'] == 1:
bucket_map = index_map.get(bucket, {})
if bucket_map == {}:
index_map[bucket] = {}
scope_map = index_map[bucket].get(scope, {})
if scope_map == {}:
index_map[bucket][scope] = {}
coll_map = index_map[bucket][scope].get(collection, {})
if coll_map == {}:
index_map[bucket][scope][collection] = {}
for index_name in list(indexes_per_collection):
index_map[bucket][scope][collection][index_name] = ""
return index_map
else:
indexes = []
for statement in self.statements:
match = re.search(r'CREATE .*INDEX (.*) ON', statement)
if match:
indexes.append(match.group(1))
return indexes
def __str__(self) -> str:
return str(self.__dict__)
class N1QLFunctionSettings(IndexSettings):
pass
class AccessSettings(PhaseSettings):
OPS = float('inf')
def define_queries(self, config):
queries = []
for query_name in self.n1ql_queries:
query = config.get_n1ql_query_definition(query_name)
queries.append(query)
self.n1ql_queries = queries
class ExtraAccessSettings(PhaseSettings):
OPS = float('inf')
class BackupSettings:
COMPRESSION = False
# Undefined test parameters will use backup's default
THREADS = None
STORAGE_TYPE = None
SINK_TYPE = None
SHARDS = None
OBJ_STAGING_DIR = None
OBJ_REGION = None
AWS_CREDENTIAL_PATH = None
INCLUDE_DATA = None
def __init__(self, options: dict):
self.compression = int(options.get('compression', self.COMPRESSION))
self.threads = options.get('threads', self.THREADS)
self.storage_type = options.get('storage_type', self.STORAGE_TYPE)
self.sink_type = options.get('sink_type', self.SINK_TYPE)
self.shards = options.get('shards', self.SHARDS)
self.obj_staging_dir = options.get('obj_staging_dir', self.OBJ_STAGING_DIR)
self.obj_region = options.get('obj_region', self.OBJ_REGION)
self.aws_credential_path = options.get('aws_credential_path', self.AWS_CREDENTIAL_PATH)
self.include_data = options.get('include_data', self.INCLUDE_DATA)
class ExportSettings:
THREADS = None
IMPORT_FILE = None
TYPE = 'json' # csv or json
FORMAT = 'lines' # lines, list
KEY_FIELD = None
LOG_FILE = None
FIELD_SEPARATOR = None
LIMIT_ROWS = False
SKIP_ROWS = False
INFER_TYPES = False
OMIT_EMPTY = False
ERRORS_LOG = None # error log file
COLLECTION_FIELD = None
SCOPE_FEILD = None
SCOPE_COLLECTION_EXP = None
def __init__(self, options: dict):
self.threads = options.get('threads', self.THREADS)
self.type = options.get('type', self.TYPE)
self.format = options.get('format', self.FORMAT)
self.import_file = options.get('import_file', self.IMPORT_FILE)
self.key_field = options.get('key_field', self.KEY_FIELD)
self.log_file = options.get('log_file', self.LOG_FILE)
self.log_file = options.get('log_file', self.LOG_FILE)
self.field_separator = options.get('field_separator',
self.FIELD_SEPARATOR)
self.limit_rows = int(options.get('limit_rows', self.LIMIT_ROWS))
self.skip_rows = int(options.get('skip_rows', self.SKIP_ROWS))
self.infer_types = int(options.get('infer_types', self.INFER_TYPES))
self.omit_empty = int(options.get('omit_empty', self.OMIT_EMPTY))
self.errors_log = options.get('errors_log', self.ERRORS_LOG)
self.collection_field = options.get('collection_field', self.COLLECTION_FIELD)
self.scope_field = options.get('scope_field', self.SCOPE_FEILD)
self.scope_collection_exp = options.get('scope_collection_exp', self.SCOPE_COLLECTION_EXP)
class EventingSettings:
WORKER_COUNT = 3
CPP_WORKER_THREAD_COUNT = 2
TIMER_WORKER_POOL_SIZE = 1
WORKER_QUEUE_CAP = 100000
TIMER_TIMEOUT = 0
TIMER_FUZZ = 0
CONFIG_FILE = "tests/eventing/config/function_sample.json"
REQUEST_URL = "http://172.23.99.247/cgi-bin/text/1kb_text_200ms.py"
def __init__(self, options: dict):
self.functions = {}
if options.get('functions') is not None:
for function_def in options.get('functions').split(','):
name, filename = function_def.split(':')
self.functions[name.strip()] = filename.strip()
self.worker_count = int(options.get("worker_count", self.WORKER_COUNT))
self.cpp_worker_thread_count = int(options.get("cpp_worker_thread_count",
self.CPP_WORKER_THREAD_COUNT))
self.timer_worker_pool_size = int(options.get("timer_worker_pool_size",
self.TIMER_WORKER_POOL_SIZE))
self.worker_queue_cap = int(options.get("worker_queue_cap",
self.WORKER_QUEUE_CAP))
self.timer_timeout = int(options.get("timer_timeout",
self.TIMER_TIMEOUT))
self.timer_fuzz = int(options.get("timer_fuzz",
self.TIMER_FUZZ))
self.config_file = options.get("config_file", self.CONFIG_FILE)
self.request_url = options.get("request_url", self.REQUEST_URL)
def __str__(self) -> str:
return str(self.__dict__)
class MagmaSettings:
COLLECT_PER_SERVER_STATS = 0
def __init__(self, options: dict):
self.collect_per_server_stats = int(options.get("collect_per_server_stats",
self.COLLECT_PER_SERVER_STATS))
class AnalyticsSettings:
NUM_IO_DEVICES = 1
DEFAULT_LOG_LEVEL = "DEBUG"
CACHE_PAGE_SIZE = 131072
STORAGE_COMPRESSION_BLOCK = None
QUERIES = ""
ANALYTICS_CONFIG_FILE = ""
DROP_DATASET = ""
ANALYTICS_LINK = "Local"
def __init__(self, options: dict):
self.num_io_devices = int(options.get('num_io_devices',
self.NUM_IO_DEVICES))
self.log_level = options.get("log_level", self.DEFAULT_LOG_LEVEL)
self.storage_buffer_cache_pagesize = options.get("cache_page_size", self.CACHE_PAGE_SIZE)
self.storage_compression_block = options.get("storage_compression_block",
self.STORAGE_COMPRESSION_BLOCK)
self.queries = options.get("queries", self.QUERIES)
self.analytics_config_file = options.get("analytics_config_file",
self.ANALYTICS_CONFIG_FILE)
self.drop_dataset = options.get("drop_dataset", self.DROP_DATASET)
self.analytics_link = options.get("analytics_link", self.ANALYTICS_LINK)
class AuditSettings:
ENABLED = True
EXTRA_EVENTS = ''
def __init__(self, options: dict):
self.enabled = bool(options.get('enabled', self.ENABLED))
self.extra_events = set(options.get('extra_events',
self.EXTRA_EVENTS).split())
class YCSBSettings:
REPO = 'git://github.com/couchbaselabs/YCSB.git'
BRANCH = 'master'
SDK_VERSION = None
LATENCY_PERCENTILES = [98]
AVERAGE_LATENCY = 0
def __init__(self, options: dict):
self.repo = options.get('repo', self.REPO)
self.branch = options.get('branch', self.BRANCH)
self.sdk_version = options.get('sdk_version', self.SDK_VERSION)
self.latency_percentiles = options.get('latency_percentiles', self.LATENCY_PERCENTILES)
if isinstance(self.latency_percentiles, str):
self.latency_percentiles = [int(x) for x in self.latency_percentiles.split(',')]
self.average_latency = int(options.get('average_latency', self.AVERAGE_LATENCY))
def __str__(self) -> str:
return str(self.__dict__)
class SDKTestingSettings:
ENABLE_SDKTEST = 0
SDK_TYPE = ['java', 'libc', 'python']
def __init__(self, options: dict):
self.enable_sdktest = int(options.get('enable_sdktest', self.ENABLE_SDKTEST))
self.sdk_type = self.SDK_TYPE + options.get('sdk_type', '').split()
def __str__(self) -> str:
return str(self.__dict__)
class ClientSettings:
LIBCOUCHBASE = None
PYTHON_CLIENT = None
def __init__(self, options: dict):
self.libcouchbase = options.get('libcouchbase', self.LIBCOUCHBASE)
self.python_client = options.get('python_client', self.PYTHON_CLIENT)
def __str__(self) -> str:
return str(self.__dict__)
class JavaDCPSettings:
REPO = 'git://github.com/couchbase/java-dcp-client.git'
BRANCH = 'master'
COMMIT = None
STREAM = 'all'
CLIENTS = 1
def __init__(self, options: dict):
self.config = options.get('config')
self.repo = options.get('repo', self.REPO)
self.branch = options.get('branch', self.BRANCH)
self.commit = options.get('commit', self.COMMIT)
self.stream = options.get('stream', self.STREAM)
self.clients = int(options.get('clients', self.CLIENTS))
def __str__(self) -> str:
return str(self.__dict__)
class MagmaBenchmarkSettings:
NUM_KVSTORES = 1
WRITE_BATCHSIZE = 1000
KEY_LEN = 40
DOC_SIZE = 1024
NUM_DOCS = 100000000
NUM_WRITES = 100000000
NUM_READS = 10000000
NUM_READERS = 32
MEM_QUOTA = 1048576
FS_CACHE_SIZE = 5368709120
WRITE_MULTIPLIER = 5
DATA_DIR = "/data"
ENGINE = "magma"
ENGINE_CONFIG = ""
def __init__(self, options: dict):
self.num_kvstores = int(options.get('num_kvstores', self.NUM_KVSTORES))
self.write_batchsize = int(options.get('write_batchsize', self.WRITE_BATCHSIZE))
self.key_len = int(options.get('key_len', self.KEY_LEN))
self.doc_size = int(options.get('doc_size', self.DOC_SIZE))
self.num_docs = int(options.get('num_docs', self.NUM_DOCS))
self.num_writes = int(options.get('num_writes', self.NUM_WRITES))
self.num_reads = int(options.get('num_reads', self.NUM_READS))
self.num_readers = int(options.get('num_readers', self.NUM_READERS))
self.memquota = int(options.get('memquota', self.MEM_QUOTA))
self.fs_cache_size = int(options.get('fs_cache_size', self.FS_CACHE_SIZE))
self.write_multiplier = int(options.get('write_multiplier', self.WRITE_MULTIPLIER))
self.data_dir = options.get('data_dir', self.DATA_DIR)
self.engine = options.get('engine', self.ENGINE)
self.engine_config = options.get('engine_config', self.ENGINE_CONFIG)
def __str__(self) -> str:
return str(self.__dict__)
class TPCDSLoaderSettings:
REPO = 'git://github.com/couchbaselabs/cbas-perf-support.git'
BRANCH = 'master'
def __init__(self, options: dict):
self.repo = options.get('repo', self.REPO)
self.branch = options.get('branch', self.BRANCH)
def __str__(self) -> str:
return str(self.__dict__)
class PYTPCCSettings:
WAREHOUSE = 1
CLIENT_THREADS = 1
DURATION = 600
MULTI_QUERY_NODE = 0
DRIVER = 'n1ql'
QUERY_PORT = '8093'
KV_PORT = '8091'
RUN_SQL_SHELL = 'run_sqlcollections.sh'
CBRINDEX_SQL = 'cbcrindexcollection_replicas3.sql'
COLLECTION_CONFIG = 'cbcrbucketcollection_20GB.sh'
DURABILITY_LEVEL = 'majority'
SCAN_CONSISTENCY = 'not_bounded'
TXTIMEOUT = 3.0
TXT_CLEANUP_WINDOW = 0
PYTPCC_BRANCH = 'py3'
PYTPCC_REPO = 'https://github.com/couchbaselabs/py-tpcc.git'
INDEX_REPLICAS = 0
def __init__(self, options: dict):
self.warehouse = int(options.get('warehouse', self.WAREHOUSE))
self.client_threads = int(options.get('client_threads',
self.CLIENT_THREADS))
self.duration = int(options.get('duration', self.DURATION))
self.multi_query_node = int(options.get('multi_query_node',
self.MULTI_QUERY_NODE))
self.driver = options.get('driver', self.DRIVER)
self.query_port = options.get('query_port', self.QUERY_PORT)
self.kv_port = options.get('kv_port', self.KV_PORT)
self.run_sql_shell = options.get('run_sql_shell', self.RUN_SQL_SHELL)
self.cbrindex_sql = options.get('cbrindex_sql', self.CBRINDEX_SQL)
self.collection_config = options.get('collection_config',
self.COLLECTION_CONFIG)
self.durability_level = options.get('durability_level',
self.DURABILITY_LEVEL)
self.scan_consistency = options.get('scan_consistency',
self.SCAN_CONSISTENCY)
self.txtimeout = options.get('txtimeout', self.TXTIMEOUT)
self.txt_cleanup_window = int(options.get('txt_cleanup_window',
self.TXT_CLEANUP_WINDOW))
self.pytpcc_branch = options.get('pytpcc_branch', self.PYTPCC_BRANCH)
self.pytpcc_repo = options.get('pytpcc_repo', self.PYTPCC_REPO)
self.use_pytpcc_backup = bool(options.get('use_pytpcc_backup'))
self.index_replicas = int(options.get('index_replicas',
self.INDEX_REPLICAS))
def __str__(self) -> str:
return str(self.__dict__)
class TestConfig(Config):
@property
def test_case(self) -> TestCaseSettings:
options = self._get_options_as_dict('test_case')
return TestCaseSettings(options)
@property
def showfast(self) -> ShowFastSettings:
options = self._get_options_as_dict('showfast')
return ShowFastSettings(options)
@property
def cluster(self) -> ClusterSettings:
options = self._get_options_as_dict('cluster')
return ClusterSettings(options)
@property
def bucket(self) -> BucketSettings:
options = self._get_options_as_dict('bucket')
return BucketSettings(options)
@property
def collection(self) -> CollectionSettings:
options = self._get_options_as_dict('collection')
return CollectionSettings(options)
@property
def users(self) -> UserSettings:
options = self._get_options_as_dict('users')
return UserSettings(options)
@property
def bucket_extras(self) -> dict:
bucket_extras = self._get_options_as_dict('bucket_extras')
options = self._get_options_as_dict('access')
access = AccessSettings(options)
if access.durability_set:
if "num_writer_threads" not in bucket_extras:
bucket_extras["num_writer_threads"] = "disk_io_optimized"
return bucket_extras
@property
def buckets(self) -> List[str]:
if self.cluster.num_buckets == 1 and self.cluster.bucket_name != 'bucket-1':
return [self.cluster.bucket_name]
else:
return [
'bucket-{}'.format(i + 1) for i in range(self.cluster.num_buckets)
]
@property
def eventing_buckets(self) -> List[str]:
return [
'eventing-bucket-{}'.format(i + 1) for i in range(self.cluster.eventing_buckets)
]
@property
def eventing_metadata_bucket(self) -> List[str]:
return [
'eventing'
]
@property
def compaction(self) -> CompactionSettings:
options = self._get_options_as_dict('compaction')
return CompactionSettings(options)
@property
def restore_settings(self) -> RestoreSettings:
options = self._get_options_as_dict('restore')
return RestoreSettings(options)
@property
def import_settings(self) -> ImportSettings:
options = self._get_options_as_dict('import')
return ImportSettings(options)
@property
def load_settings(self):
load_options = self._get_options_as_dict('load')
load_settings = LoadSettings(load_options)
client_options = self._get_options_as_dict('clients')
client_settings = ClientSettings(client_options)
if hasattr(client_settings, "pillowfight"):
load_settings.custom_pillowfight = True
collection_options = self._get_options_as_dict('collection')
collection_settings = CollectionSettings(collection_options)
if collection_settings.collection_map is not None:
load_settings.collections = collection_settings.collection_map
return load_settings
@property
def hot_load_settings(self) -> HotLoadSettings:
options = self._get_options_as_dict('hot_load')
hot_load = HotLoadSettings(options)
load = self.load_settings
hot_load.doc_gen = load.doc_gen
hot_load.array_size = load.array_size
hot_load.num_categories = load.num_categories
hot_load.num_replies = load.num_replies
hot_load.size = load.size
hot_load.key_fmtr = load.key_fmtr
client_options = self._get_options_as_dict('clients')
client_settings = ClientSettings(client_options)
if hasattr(client_settings, "pillowfight"):
hot_load.custom_pillowfight = True
collection_options = self._get_options_as_dict('collection')
collection_settings = CollectionSettings(collection_options)
if collection_settings.collection_map is not None:
hot_load.collections = collection_settings.collection_map
return hot_load
@property
def xattr_load_settings(self) -> XattrLoadSettings:
options = self._get_options_as_dict('xattr_load')
return XattrLoadSettings(options)
@property
def xdcr_settings(self) -> XDCRSettings:
options = self._get_options_as_dict('xdcr')
return XDCRSettings(options)
@property
def views_settings(self) -> ViewsSettings:
options = self._get_options_as_dict('views')
return ViewsSettings(options)
@property
def gsi_settings(self) -> GSISettings:
options = self._get_options_as_dict('secondary')
return GSISettings(options)
@property
def dcp_settings(self) -> DCPSettings:
options = self._get_options_as_dict('dcp')
return DCPSettings(options)
@property
def index_settings(self) -> IndexSettings:
options = self._get_options_as_dict('index')
collection_options = self._get_options_as_dict('collection')
collection_settings = CollectionSettings(collection_options)
if collection_settings.collection_map is not None:
options['collection_map'] = collection_settings.collection_map
return IndexSettings(options)
@property
def n1ql_function_settings(self) -> N1QLFunctionSettings:
options = self._get_options_as_dict('n1ql_function')
return N1QLFunctionSettings(options)
@property
def n1ql_settings(self) -> N1QLSettings:
options = self._get_options_as_dict('n1ql')
return N1QLSettings(options)
@property
def backup_settings(self) -> BackupSettings:
options = self._get_options_as_dict('backup')
return BackupSettings(options)
@property
def export_settings(self) -> ExportSettings:
options = self._get_options_as_dict('export')
return ExportSettings(options)
@property
def access_settings(self) -> AccessSettings:
options = self._get_options_as_dict('access')
access = AccessSettings(options)
java_dcp_options = self._get_options_as_dict('java_dcp')
java_dcp_settings = JavaDCPSettings(java_dcp_options)
access.java_dcp_config = java_dcp_settings.config
access.java_dcp_clients = java_dcp_settings.clients
access.java_dcp_stream = java_dcp_settings.stream
client_options = self._get_options_as_dict('clients')
client_settings = ClientSettings(client_options)
if hasattr(client_settings, "pillowfight"):
access.custom_pillowfight = True
user_options = self._get_options_as_dict('users')
user_settings = UserSettings(user_options)
access.users = user_settings.num_users_per_bucket
collection_options = self._get_options_as_dict('collection')
collection_settings = CollectionSettings(collection_options)
if collection_settings.collection_map is not None:
access.collections = collection_settings.collection_map
if access.split_workload is not None:
with open(access.split_workload) as f:
access.split_workload = json.load(f)
if hasattr(access, 'n1ql_queries'):
access.define_queries(self)
load_settings = self.load_settings
access.doc_gen = load_settings.doc_gen
access.doc_groups = load_settings.doc_groups
access.range_distance = load_settings.range_distance
access.array_size = load_settings.array_size
access.num_categories = load_settings.num_categories
access.num_replies = load_settings.num_replies
access.size = load_settings.size
access.key_fmtr = load_settings.key_fmtr
access.bucket_list = self.buckets
return access
@property
def extra_access_settings(self) -> ExtraAccessSettings:
options = self._get_options_as_dict('extra_access')
extra_access = ExtraAccessSettings(options)
java_dcp_options = self._get_options_as_dict('java_dcp')
java_dcp_settings = JavaDCPSettings(java_dcp_options)
extra_access.java_dcp_config = java_dcp_settings.config
extra_access.java_dcp_clients = java_dcp_settings.clients
extra_access.java_dcp_stream = java_dcp_settings.stream
client_options = self._get_options_as_dict('clients')
client_settings = ClientSettings(client_options)
if hasattr(client_settings, "pillowfight"):
extra_access.custom_pillowfight = True
user_options = self._get_options_as_dict('users')
user_settings = UserSettings(user_options)
extra_access.users = user_settings.num_users_per_bucket
collection_options = self._get_options_as_dict('collection')
collection_settings = CollectionSettings(collection_options)
if collection_settings.collection_map is not None:
extra_access.collections = collection_settings.collection_map
load_settings = self.load_settings
extra_access.doc_gen = load_settings.doc_gen
extra_access.range_distance = load_settings.range_distance
extra_access.array_size = load_settings.array_size
extra_access.num_categories = load_settings.num_categories
extra_access.num_replies = load_settings.num_replies
extra_access.size = load_settings.size
extra_access.key_fmtr = load_settings.key_fmtr
extra_access.bucket_list = self.buckets
return extra_access
@property
def rebalance_settings(self) -> RebalanceSettings:
options = self._get_options_as_dict('rebalance')
return RebalanceSettings(options)
@property
def stats_settings(self) -> StatsSettings:
options = self._get_options_as_dict('stats')
return StatsSettings(options)
@property
def profiling_settings(self) -> ProfilingSettings:
options = self._get_options_as_dict('profiling')
return ProfilingSettings(options)
@property
def internal_settings(self) -> dict:
return self._get_options_as_dict('internal')
@property
def xdcr_cluster_settings(self) -> dict:
return self._get_options_as_dict('xdcr_cluster')
@property
def jts_access_settings(self) -> JTSAccessSettings:
options = self._get_options_as_dict('jts')
return JTSAccessSettings(options)
@property
def ycsb_settings(self) -> YCSBSettings:
options = self._get_options_as_dict('ycsb')
return YCSBSettings(options)
@property
def sdktesting_settings(self) -> SDKTestingSettings:
options = self._get_options_as_dict('sdktesting')
return SDKTestingSettings(options)
@property
def eventing_settings(self) -> EventingSettings:
options = self._get_options_as_dict('eventing')
return EventingSettings(options)
@property
def magma_settings(self) -> MagmaSettings:
options = self._get_options_as_dict('magma')
return MagmaSettings(options)
@property
def analytics_settings(self) -> AnalyticsSettings:
options = self._get_options_as_dict('analytics')
return AnalyticsSettings(options)
@property
def audit_settings(self) -> AuditSettings:
options = self._get_options_as_dict('audit')
return AuditSettings(options)
def get_n1ql_query_definition(self, query_name: str) -> dict:
return self._get_options_as_dict('n1ql-{}'.format(query_name))
@property
def fio(self) -> dict:
return self._get_options_as_dict('fio')
@property
def java_dcp_settings(self) -> JavaDCPSettings:
options = self._get_options_as_dict('java_dcp')
return JavaDCPSettings(options)
@property
def client_settings(self) -> ClientSettings:
options = self._get_options_as_dict('clients')
return ClientSettings(options)
@property
def magma_benchmark_settings(self) -> MagmaBenchmarkSettings:
options = self._get_options_as_dict('magma_benchmark')
return MagmaBenchmarkSettings(options)
@property
def tpcds_loader_settings(self) -> TPCDSLoaderSettings:
options = self._get_options_as_dict('TPCDSLoader')
return TPCDSLoaderSettings(options)
@property
def pytpcc_settings(self) -> PYTPCCSettings:
options = self._get_options_as_dict('py_tpcc')
return PYTPCCSettings(options)
class TargetSettings:
def __init__(self, host: str, bucket: str, password: str, prefix: str, cloud: dict = None):
self.password = password
self.node = host
self.bucket = bucket
self.prefix = prefix
self.cloud = cloud
@property
def connection_string(self) -> str:
return 'couchbase://{username}:{password}@{host}/{bucket}'.format(
username=self.bucket, # Backward compatibility
password=self.password,
host=self.node,
bucket=self.bucket,
)
class TargetIterator(Iterable):
def __init__(self,
cluster_spec: ClusterSpec,
test_config: TestConfig,
prefix: str = None):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.prefix = prefix
def __iter__(self) -> Iterator[TargetSettings]:
password = self.test_config.bucket.password
prefix = self.prefix
for master in self.cluster_spec.masters:
for bucket in self.test_config.buckets:
if self.prefix is None:
prefix = target_hash(master)
if self.cluster_spec.dynamic_infrastructure:
yield TargetSettings(master, bucket, password, prefix,
{'cluster_svc': 'cb-example-perf'})
else:
yield TargetSettings(master, bucket, password, prefix)
|
py
|
1a571df2e10af82a1f9d196e2fdf81a5a5a736cb
|
from pymixconf.jsonconf import JSONConf
import os
def test_yamlconf():
loader = JSONConf(config_directory="test/fixtures", environment_key="TEST_ENV")
os.environ["TEST_ENV"] = "dev"
data = loader.load_config()
expected = {
"flask": {
"port": 7000
},
"logging": {
"level": "INFO"
},
"custom": {
"users": {
"enabled": True,
"admins": ["steve"]
}
}
}
print(data)
assert data == expected
|
bzl
|
1a571e3830845b86856c86bca3ba3b4d8e985b70
|
"""
Defines repositories and register toolchains for versions of the tools built
from source
"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
_ALL_CONTENT = """\
filegroup(
name = "all_srcs",
srcs = glob(["**"]),
visibility = ["//visibility:public"],
)
"""
# buildifier: disable=unnamed-macro
def built_toolchains(cmake_version, make_version, ninja_version):
"""Register toolchains for built tools that will be built from source"""
_cmake_toolchain(cmake_version)
_make_toolchain(make_version)
_ninja_toolchain(ninja_version)
def _cmake_toolchain(version):
native.register_toolchains(
"@rules_foreign_cc//toolchains:built_cmake_toolchain",
)
if version == "3.19.6":
maybe(
http_archive,
name = "cmake_src",
build_file_content = _ALL_CONTENT,
sha256 = "ec87ab67c45f47c4285f204280c5cde48e1c920cfcfed1555b27fb3b1a1d20ba",
strip_prefix = "cmake-3.19.6",
urls = [
"https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6.tar.gz",
],
)
return
fail("Unsupported cmake version: " + str(version))
def _make_toolchain(version):
native.register_toolchains(
"@rules_foreign_cc//toolchains:built_make_toolchain",
)
if version == "4.3":
maybe(
http_archive,
name = "gnumake_src",
build_file_content = _ALL_CONTENT,
sha256 = "e05fdde47c5f7ca45cb697e973894ff4f5d79e13b750ed57d7b66d8defc78e19",
strip_prefix = "make-4.3",
urls = [
"http://ftpmirror.gnu.org/gnu/make/make-4.3.tar.gz",
],
)
return
fail("Unsupported make version: " + str(version))
def _ninja_toolchain(version):
native.register_toolchains(
"@rules_foreign_cc//toolchains:built_ninja_toolchain",
)
if version == "1.10.2":
maybe(
http_archive,
name = "ninja_build_src",
build_file_content = _ALL_CONTENT,
sha256 = "ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed",
strip_prefix = "ninja-1.10.2",
urls = [
"https://github.com/ninja-build/ninja/archive/v1.10.2.tar.gz",
],
)
return
fail("Unsupported ninja version: " + str(version))
|
py
|
1a571e828a9bf81598d2da0bec7a22064bb1c4fa
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import json
import logging
import os
import time
import dateutil.tz
METRICS_DIR = os.environ.get("SAGEMAKER_METRICS_DIRECTORY", ".")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SageMakerFileMetricsWriter(object):
"""Writes metric data to file."""
def __init__(self, metrics_file_path=None):
self._metrics_file_path = metrics_file_path
self._file = None
self._closed = False
def log_metric(self, metric_name, value, timestamp=None, iteration_number=None):
"""Write a metric to file.
Args:
metric_name (str): The name of the metric.
value (str): The value of the metric.
timestamp (datetime): Timestamp of the metric.
iteration_number (int): Iteration number of the metric.
Raises:
SageMakerMetricsWriterException: If the metrics file is closed.
"""
raw_metric_data = _RawMetricData(
metric_name=metric_name, value=value, timestamp=timestamp, iteration_number=iteration_number
)
try:
logging.debug("Writing metric: %s", raw_metric_data)
self._file.write(json.dumps(raw_metric_data.to_record()))
self._file.write("\n")
except AttributeError:
if self._closed:
raise SageMakerMetricsWriterException("log_metric called on a closed writer")
elif not self._file:
self._file = open(self._get_metrics_file_path(), "a", buffering=1)
self._file.write(json.dumps(raw_metric_data.to_record()))
self._file.write("\n")
else:
raise
def close(self):
"""Closes the metric file."""
if not self._closed and self._file:
self._file.close()
self._file = None # invalidate reference, causing subsequent log_metric to fail.
self._closed = True
def __enter__(self):
"""Return self"""
return self
def __exit__(self, type, value, traceback):
"""Execute self.close()"""
self.close()
def __del__(self):
"""Execute self.close()"""
self.close()
def _get_metrics_file_path(self):
pid_filename = "{}.json".format(str(os.getpid()))
metrics_file_path = self._metrics_file_path or os.path.join(METRICS_DIR, pid_filename)
logging.debug("metrics_file_path=" + metrics_file_path)
return metrics_file_path
class SageMakerMetricsWriterException(Exception):
"""SageMakerMetricsWriterException"""
def __init__(self, message, errors=None):
super().__init__(message)
if errors:
self.errors = errors
class _RawMetricData(object):
MetricName = None
Value = None
Timestamp = None
IterationNumber = None
def __init__(self, metric_name, value, timestamp=None, iteration_number=None):
if timestamp is None:
timestamp = time.time()
elif isinstance(timestamp, datetime.datetime):
# If the input is a datetime then convert it to UTC time. Assume a naive datetime is in local timezone
if not timestamp.tzinfo:
timestamp = timestamp.replace(tzinfo=dateutil.tz.tzlocal())
timestamp = (timestamp - timestamp.utcoffset()).replace(tzinfo=datetime.timezone.utc)
timestamp = timestamp.timestamp()
else:
timestamp = float(timestamp)
if timestamp < (time.time() - 1209600) or timestamp > (time.time() + 7200):
raise ValueError(
"Supplied timestamp %f is invalid."
" Timestamps must be between two weeks before and two hours from now." % timestamp
)
value = float(value)
self.MetricName = metric_name
self.Value = float(value)
self.Timestamp = timestamp
if iteration_number is not None:
assert isinstance(iteration_number, int)
self.IterationNumber = iteration_number
def to_record(self):
return self.__dict__
def __str__(self):
return repr(self)
def __repr__(self):
return "{}({})".format(
type(self).__name__,
",".join(["{}={}".format(k, repr(v)) for k, v in vars(self).items()]),
)
|
py
|
1a572152323f49a7ec7ba1a99b0ec9790322b751
|
from igramscraper.instagram import Instagram
import urllib.request
import argparse
import os
def get_media_from_hashtag(tag, media_type, quality, max_images, path):
instagram = Instagram()
medias = instagram.get_medias_by_tag(tag, count=max_images)
count = 1
for media in medias:
media.type = 'image' if media.type == 'sidecar' or media.type == 'carousel' else media.type
# Extracting Image URL
if (media.type == 'image' and media_type == 'image' or media_type == 'all') and not media.is_ad:
# Get the links form media
all_quality = ['low', 'standard', 'high']
url = media.__getattribute__(f"image_{quality}_resolution_url")
# If the preferred quality is not available
if not url:
all_quality.remove(quality)
for q in all_quality:
url = media.__getattribute__(
f"image_{q}_resolution_url")
if url:
break
# Extracting Video URL
if (media.type == 'video' and media_type == 'all' or media_type == 'video') and not media.is_ad:
# Get the links form media
media = instagram.get_media_by_id(media.identifier)
url = media.video_standard_resolution_url or media.video_low_bandwidth_url or media.video_low_resolution_url or media.video_url
# Downloading the media
if url:
urllib.request.urlretrieve(
url, f"{path}/{media.type}s/{media.type}{count}.{'jpg' if media.type == 'image' else 'mp4'}")
print(f"{count}/{max_images} media downloaded")
else:
print(
f"[{count}] Failed downloading the media {media.link} (id - {media.identifier})")
count += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Get All Post From Instagram Hashtag')
parser.add_argument('-t', '--tag', required=True, help="valid tag name")
parser.add_argument('-p', '--path', required=False,
help="Path to save media", default="media")
parser.add_argument('-mm', '--max-media', required=False,
help="Max number of media to download", type=int, default=10)
parser.add_argument('-mt', '--media-type', required=False,
help="For Photos => `image` Videos => `video` All => `all` ", default="all")
parser.add_argument('-q', '--quality', required=False,
help="Media Quality Use either of `low`, `standard` or `high`", default="standard")
arguments = parser.parse_args()
# Checking
if arguments.media_type not in ["video", "image", "all"]:
raise ValueError("Media Type should be either videos, images or all")
if arguments.quality not in ["low", "high", "standard"]:
raise ValueError("Quality should be either low, standard or high")
if not os.path.exists(arguments.path):
print("Media path not found! \nCreating media path!")
os.mkdir(arguments.path)
if not os.path.exists(arguments.path + "/images"):
os.mkdir(arguments.path + "/images")
if not os.path.exists(arguments.path + "/videos"):
os.mkdir(arguments.path + "/videos")
# Running
get_media_from_hashtag(tag=arguments.tag, media_type=arguments.media_type,
quality=arguments.quality, max_images=arguments.max_media, path=arguments.path)
|
py
|
1a572160f0301fac23be1e029739d5f74a100662
|
from cricsheet.io_xml.parsers.parser import Parser
class MatchParser(Parser):
def __init__(self, match_id):
self.match_id = match_id
self.metadata_parser = MatchMetadataParser()
self.outcome_parser = MatchOutcomeParser()
self.umpire_parser = UmpireParser()
def parse(self, raw):
data = {'id': self.match_id}
data.update(self.metadata_parser.parse(raw))
data.update(self.outcome_parser.parse(raw['outcome']))
if 'umpires' in raw:
data.update(self.umpire_parser.parse(raw['umpires']['umpire']))
return data
class MatchMetadataParser(Parser):
def __init__(self):
pass
def parse(self, metadata):
if 'player_of_match' in metadata:
if type(metadata['player_of_match']['player_of_match']) == list:
player_of_match = metadata['player_of_match']['player_of_match'][0]
else:
player_of_match = metadata['player_of_match']['player_of_match']
else:
player_of_match = None
return {
'gender': metadata['gender'],
'match_type': metadata['match_type'],
'competition': metadata['competition'] if 'competition' in metadata else None,
'max_overs': metadata['overs'] if 'overs' in metadata else None,
'venue': metadata['venue'] if 'venue' in metadata else None,
'city': metadata['city'] if 'city' in metadata else None,
'start_date': metadata['dates']['date'][0],
'end_date': metadata['dates']['date'][-1],
'team_home': metadata['teams']['team'][0],
'team_away': metadata['teams']['team'][1],
'player_of_match': player_of_match,
'toss_won_by': metadata['toss']['winner'],
'toss_decision': metadata['toss']['decision']
}
class MatchOutcomeParser(Parser):
def __init__(self):
pass
def parse(self, outcome):
has_winner = (any(result in outcome for result in ('winner', 'eliminator')))
result = 'win' if has_winner else outcome['result']
method = outcome['method'] if 'method' in outcome else \
'eliminator' if 'eliminator' in outcome else None
winner = outcome['winner'] if 'winner' in outcome else \
outcome['eliminator'] if 'eliminator' in outcome else None
by = outcome['by'] if 'by' in outcome else None
#print(has_winner, result, method, winner, by)
if (not has_winner) or ('eliminator' in outcome) or (method == 'Awarded'):
won_by_type = None
won_by_value = None
elif by is not None:
if ('innings' in by) and ('wickets' in by):
won_by_type = 'innings_and_wickets'
won_by_value = by['wickets']
elif ('innings' not in by) and ('wickets' in by):
won_by_type = 'wickets'
won_by_value = by['wickets']
elif ('innings' in by) and ('runs' in by):
won_by_type = 'innings_and_runs'
won_by_value = by['runs']
elif ('innings' not in by) and ('runs' in by):
won_by_type = 'runs'
won_by_value = by['runs']
else:
won_by_type = None
won_by_value = None
return {
'result': result,
'method': method,
'winner': winner,
'won_by_type': won_by_type,
'won_by_value': won_by_value
}
class UmpireParser(Parser):
def __init__(self):
pass
def parse(self, umpires):
if len(umpires) == 2:
first, second = umpires
third, forth = None, None
if len(umpires) == 3:
first, second, third = umpires
forth = None
if len(umpires) == 4:
first, second, third, forth = umpires
return {
'umpire_first': first,
'umpire_second': second,
'umpire_third': third,
'umpire_forth': forth
}
|
py
|
1a5721a288955d3c0b1a284ee675b97d84c15f07
|
class SimpleClassWithBlankParentheses():
pass
class ClassWithSpaceParentheses ( ):
first_test_data = 90
second_test_data = 100
def test_func(self):
return None
class ClassWithEmptyFunc(object):
def func_with_blank_parentheses():
return 5
def public_func_with_blank_parentheses():
return None
def class_under_the_func_with_blank_parentheses():
class InsideFunc():
pass
class NormalClass (
):
def func_for_testing(self, first, second):
sum = first + second
return sum
# output
class SimpleClassWithBlankParentheses:
pass
class ClassWithSpaceParentheses:
first_test_data = 90
second_test_data = 100
def test_func(self):
return None
class ClassWithEmptyFunc(object):
def func_with_blank_parentheses():
return 5
def public_func_with_blank_parentheses():
return None
def class_under_the_func_with_blank_parentheses():
class InsideFunc:
pass
class NormalClass:
def func_for_testing(self, first, second):
sum = first + second
return sum
|
py
|
1a5721ee958b655e75e9eb1bf66e8537c61a57be
|
"""Remove processed notebooks from disk"""
import argparse
import shutil
from reproducemegit.jupyter_reproducibility import config
import os
from reproducemegit.jupyter_reproducibility import consts
from reproducemegit.jupyter_reproducibility.db import Repository, Notebook, connect
from reproducemegit.jupyter_reproducibility.utils import vprint, StatusLogger, mount_basedir, check_exit, savepid
def apply(session, status, keep, count, interval, reverse, check):
"""Compress repositories"""
filters = [
Repository.processed.op('&')(consts.R_COMPRESS_OK) == 0,
]
if interval:
filters += [
Repository.id >= interval[0],
Repository.id <= interval[1],
]
query = session.query(Repository).filter(*filters)
if count:
print(query.count())
return
if reverse:
query = query.order_by(
Repository.id.desc()
)
else:
query = query.order_by(
Repository.id.asc()
)
for repository in query:
if check_exit(check):
vprint(0, "Found .exit file. Exiting")
return
status.report()
vprint(0, "Compressing {}".format(repository))
vprint(1, "Into {}".format(repository.zip_path))
with mount_basedir():
try:
if repository.path.exists():
commit = repository.get_commit()
if commit != repository.commit:
repository.processed |= consts.R_COMMIT_MISMATCH
repository.processed |= consts.R_COMPRESS_ERROR
if repository.zip_path.exists() or repository.compress():
if repository.processed & consts.R_COMPRESS_ERROR:
repository.processed -= consts.R_COMPRESS_ERROR
if not keep:
shutil.rmtree(str(repository.path), ignore_errors=True)
elif not repository.zip_path.exists():
if repository.processed & consts.R_COMPRESS_ERROR:
repository.processed -= consts.R_COMPRESS_ERROR
if not repository.path.exists():
repository.processed |= consts.R_UNAVAILABLE_FILES
vprint(1, "failed")
if repository.zip_path.exists():
vprint(1, "ok")
repository.processed |= consts.R_COMPRESS_OK
except Exception as err:
vprint(1, "Failed: {}".format(err))
session.add(repository)
status.count += 1
session.commit()
def main():
"""Main function"""
script_name = os.path.basename(__file__)[:-3]
parser = argparse.ArgumentParser(
description="Compress processed repositories")
parser.add_argument("-v", "--verbose", type=int, default=config.VERBOSE,
help="increase output verbosity")
parser.add_argument("-z", "--compression", type=str,
default=config.COMPRESSION,
help="compression algorithm")
parser.add_argument("-e", "--retry-errors", action='store_true',
help="retry errors")
parser.add_argument("-i", "--interval", type=int, nargs=2,
default=config.REPOSITORY_INTERVAL,
help="id interval")
parser.add_argument("-c", "--count", action='store_true',
help="count results")
parser.add_argument('-r', '--reverse', action='store_true',
help='iterate in reverse order')
parser.add_argument('-k', '--keep-uncompressed', action='store_true',
help='keep uncompressed files')
parser.add_argument('--check', type=str, nargs='*',
default={'all', script_name, script_name + '.py'},
help='check name in .exit')
args = parser.parse_args()
config.VERBOSE = args.verbose
status = None
if not args.count:
status = StatusLogger(script_name)
status.report()
config.COMPRESSION = args.compression
with connect() as session, savepid():
apply(
session,
status,
args.keep_uncompressed,
args.count,
args.interval,
args.reverse,
set(args.check)
)
if __name__ == "__main__":
main()
|
py
|
1a572274b15de5a4099b3e316811e7da64b6d960
|
# Lab 4 Multi-variable linear regression
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility
xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
# Make sure the sape and data are OK
print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data)
# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Hypothesis
hypothesis = tf.matmul(X, W) + b
# Simplified cost function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
for step in range(2001):
feed = {X: x_data, Y: y_data}
sess.run(train, feed_dict=feed)
if step % 10 == 0:
print(step, "Cost: ", sess.run(cost, feed_dict=feed),
"\nPrediction:\n", sess.run(hypothesis, feed_dict=feed))
# Ask my score
score = np.array([[100, 70, 101]])
print("Your score will be ", sess.run(hypothesis, feed_dict={X: score}))
score = np.array([[60, 70, 110], [90, 100, 80]])
print("Other scores will be ", sess.run(hypothesis, feed_dict={X: score}))
'''
Your score will be [[ 181.73277283]]
Other scores will be [[ 145.86265564]
[ 187.23129272]]
'''
|
py
|
1a5722d03546ae810abd263f6b8b5d1ea0bb6a52
|
import os
import subprocess
import glob
import hashlib
import shutil
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
android_packages = ("ai.comma.plus.offroad",)
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"], encoding='utf8').strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v, k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def start_offroad():
set_package_permissions()
system("am start -n ai.comma.plus.offroad/.MainActivity")
def set_package_permissions():
try:
output = subprocess.check_output(['dumpsys', 'package', 'ai.comma.plus.offroad'], encoding="utf-8")
given_permissions = output.split("runtime permissions")[1]
except Exception:
given_permissions = ""
wanted_permissions = ["ACCESS_FINE_LOCATION", "READ_PHONE_STATE", "READ_EXTERNAL_STORAGE"]
for permission in wanted_permissions:
if permission not in given_permissions:
pm_grant("ai.comma.plus.offroad", "android.permission." + permission)
appops_set("ai.comma.plus.offroad", "SU", "allow")
appops_set("ai.comma.plus.offroad", "WIFI_SCAN", "allow")
def appops_set(package, op, mode):
system(f"LD_LIBRARY_PATH= appops set {package} {op} {mode}")
def pm_grant(package, permission):
system(f"pm grant {package} {permission}")
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
# *** external functions ***
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path, 'rb').read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app], 'rb').read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
if __name__ == "__main__":
update_apks()
|
py
|
1a5723ca932e86aa088cfb33ea9a64fdb7cad210
|
import time
from cpo import *
def run_demo():
Sols = [
BadRWSolution,
ReadFavouredRWSolution,
WriteFavouredSolution,
BalancedRWSolution,
]
for Sol in Sols:
run_solution(Sol)
return Sols
def run_solution(Solution):
N_READERS = 20
N_WRITERS = 20
DATA_LEN = 10000
RUN_TIME = 5 # seconds
sol = Solution(DATA_LEN)
sol.spawn_readers(N_READERS)
sol.spawn_writers(N_WRITERS)
time.sleep(RUN_TIME)
sol.kill()
return sol
def main():
Sols = run_demo()
for Sol in Sols:
print(
Sol.describe(),
'We got...',
Sol.get_result()
)
class RWSolution:
def __init__(self, data_len):
self.data = [0] * data_len
self.n_reads = AtomicNum(0)
self.n_writes = AtomicNum(0)
self.n_corrupted = AtomicNum(0)
self._kill = False
def do_read(self, idx):
val = self.data[0]
for i in range(len(self.data)):
if val != self.data[i]:
self.n_corrupted.inc(1)
break
self.n_reads.inc(1)
def do_write(self, idx):
for i in range(len(self.data)):
self.data[i] = idx
self.n_writes.inc(1)
def get_result(self):
return {
'writes': self.n_writes.get(),
'reads': self.n_reads.get(),
'corrupt reads': self.n_corrupted.get(),
}
def spawn_readers(self, num_readers):
fork_procs(range(num_readers))(self.reader_work)
def spawn_writers(self, num_writers):
fork_procs(range(num_writers))(self.writer_work)
def kill(self):
self._kill = True
@repeat
def reader_work(self, idx):
if self._kill:
raise Stopped
self.before_read(idx)
self.do_read(idx)
self.after_read(idx)
@repeat
def writer_work(self, idx):
if self._kill:
raise Stopped
self.before_write(idx)
self.do_write(idx)
self.after_write(idx)
@staticmethod
def describe() -> str:
raise NotImplementedError
def before_read(self, idx):
raise NotImplementedError
def after_read(self, idx):
raise NotImplementedError
def before_write(self, idx):
raise NotImplementedError
def after_write(self, idx):
raise NotImplementedError
class BadRWSolution(RWSolution):
def __init__(self, data_len):
super().__init__(data_len)
@staticmethod
def describe() -> str:
return "With no protection."
def before_read(self, idx):
pass
def after_read(self, idx):
pass
def before_write(self, idx):
pass
def after_write(self, idx):
pass
class ReadFavouredRWSolution(RWSolution):
def __init__(self, data_len):
super().__init__(data_len)
self.read_count = 0
self.read_semaphore = BooleanSemaphore(True)
self.write_semaphore = BooleanSemaphore(True)
@staticmethod
def describe() -> str:
return "With protection favouring reads."
def before_read(self, idx):
self.read_semaphore.acquire()
self.read_count += 1
if self.read_count == 1:
self.write_semaphore.acquire()
self.read_semaphore.release()
def after_read(self, idx):
self.read_semaphore.acquire()
self.read_count -= 1
if self.read_count == 0:
self.write_semaphore.release()
self.read_semaphore.release()
def before_write(self, idx):
self.write_semaphore.acquire()
def after_write(self, idx):
self.write_semaphore.release()
class WriteFavouredSolution(RWSolution):
def __init__(self, data_len):
super().__init__(data_len)
self.read_count = 0
self.write_count = 0
self.read_semaphore = BooleanSemaphore(True)
self.write_semaphore = BooleanSemaphore(True)
self.read_try_semaphore = BooleanSemaphore(True)
self.resource_semaphore = BooleanSemaphore(True)
@staticmethod
def describe() -> str:
return "With protection favouring writes."
def before_read(self, idx):
self.read_try_semaphore.acquire()
self.read_semaphore.acquire()
self.read_count += 1
if self.read_count == 1:
self.resource_semaphore.acquire()
self.read_semaphore.release()
self.read_try_semaphore.release()
def after_read(self, idx):
self.read_semaphore.acquire()
self.read_count -= 1
if self.read_count == 0:
self.resource_semaphore.release()
self.read_semaphore.release()
def before_write(self, idx):
self.write_semaphore.acquire()
self.write_count += 1
if self.write_count == 1:
self.read_try_semaphore.acquire()
self.write_semaphore.release()
self.resource_semaphore.acquire()
def after_write(self, idx):
self.resource_semaphore.release()
self.write_semaphore.acquire()
self.write_count -= 1
if self.write_count == 0:
self.read_try_semaphore.release()
self.write_semaphore.release()
class BalancedRWSolution(RWSolution):
def __init__(self, data_len):
super().__init__(data_len)
self.read_count = 0
self.resource_semaphore = BooleanSemaphore(True)
self.read_semaphore = BooleanSemaphore(True)
self.queue_semaphore = BooleanSemaphore(True)
@staticmethod
def describe() -> str:
return "With balanced protection."
def before_read(self, idx):
self.queue_semaphore.acquire()
self.read_semaphore.acquire()
self.read_count += 1
if self.read_count == 1:
self.resource_semaphore.acquire()
self.queue_semaphore.release()
self.read_semaphore.release()
def after_read(self, idx):
self.read_semaphore.acquire()
self.read_count -= 1
if self.read_count == 0:
self.resource_semaphore.release()
self.read_semaphore.release()
def before_write(self, idx):
self.queue_semaphore.acquire()
self.resource_semaphore.acquire()
self.queue_semaphore.release()
def after_write(self, idx):
self.resource_semaphore.release()
if __name__ == '__main__':
main()
|
py
|
1a5723f0e631dc17c4789b8ea68e2ecdda8b5ea6
|
from unittest import mock
import pytest
from django.contrib.auth.models import AnonymousUser
from django.test import override_settings
from ...account import CustomerEvents
from ...account.models import CustomerEvent
from ...core.exceptions import InsufficientStock
from ...core.notify_events import NotifyEventType
from ...core.taxes import zero_money, zero_taxed_money
from ...giftcard import GiftCardEvents
from ...giftcard.models import GiftCard, GiftCardEvent
from ...order import OrderEvents
from ...order.models import OrderEvent
from ...order.notifications import get_default_order_payload
from ...plugins.manager import get_plugins_manager
from ...product.models import ProductTranslation, ProductVariantTranslation
from ...tests.utils import flush_post_commit_hooks
from .. import calculations
from ..complete_checkout import _create_order, _prepare_order_data
from ..fetch import fetch_checkout_info, fetch_checkout_lines
from ..utils import add_variant_to_checkout
@mock.patch("saleor.plugins.manager.PluginsManager.notify")
def test_create_order_captured_payment_creates_expected_events(
mock_notify,
checkout_with_item,
customer_user,
shipping_method,
payment_txn_captured,
channel_USD,
):
checkout = checkout_with_item
checkout_user = customer_user
# Ensure not events are existing prior
assert not OrderEvent.objects.exists()
assert not CustomerEvent.objects.exists()
# Prepare valid checkout
checkout.user = checkout_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_shipping_address
checkout.shipping_method = shipping_method
checkout.payments.add(payment_txn_captured)
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
# Place checkout
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user,
app=None,
manager=manager,
)
flush_post_commit_hooks()
(
order_placed_event,
payment_captured_event,
order_fully_paid_event,
order_confirmed_event,
) = order.events.all() # type: OrderEvent
# Ensure the correct order event was created
# is the event the expected type
assert order_placed_event.type == OrderEvents.PLACED
# is the user anonymous/ the customer
assert order_placed_event.user == checkout_user
# is the associated backref order valid
assert order_placed_event.order is order
# ensure a date was set
assert order_placed_event.date
# should not have any additional parameters
assert not order_placed_event.parameters
# Ensure the correct order event was created
# is the event the expected type
assert payment_captured_event.type == OrderEvents.PAYMENT_CAPTURED
# is the user anonymous/ the customer
assert payment_captured_event.user == checkout_user
# is the associated backref order valid
assert payment_captured_event.order is order
# ensure a date was set
assert payment_captured_event.date
# should not have any additional parameters
assert "amount" in payment_captured_event.parameters.keys()
assert "payment_id" in payment_captured_event.parameters.keys()
assert "payment_gateway" in payment_captured_event.parameters.keys()
# Ensure the correct order event was created
# is the event the expected type
assert order_fully_paid_event.type == OrderEvents.ORDER_FULLY_PAID
# is the user anonymous/ the customer
assert order_fully_paid_event.user == checkout_user
# is the associated backref order valid
assert order_fully_paid_event.order is order
# ensure a date was set
assert order_fully_paid_event.date
# should not have any additional parameters
assert not order_fully_paid_event.parameters
expected_order_payload = {
"order": get_default_order_payload(order, checkout.redirect_url),
"recipient_email": order.get_customer_email(),
"site_name": "mirumee.com",
"domain": "mirumee.com",
}
expected_payment_payload = {
"order": get_default_order_payload(order),
"recipient_email": order.get_customer_email(),
"payment": {
"created": payment_txn_captured.created,
"modified": payment_txn_captured.modified,
"charge_status": payment_txn_captured.charge_status,
"total": payment_txn_captured.total,
"captured_amount": payment_txn_captured.captured_amount,
"currency": payment_txn_captured.currency,
},
"site_name": "mirumee.com",
"domain": "mirumee.com",
}
# Ensure the correct order confirmed event was created
# should be order confirmed event
assert order_confirmed_event.type == OrderEvents.CONFIRMED
# ensure the user is checkout user
assert order_confirmed_event.user == checkout_user
# ensure the order confirmed event is related to order
assert order_confirmed_event.order is order
# ensure a date was set
assert order_confirmed_event.date
# ensure the event parameters are empty
assert order_confirmed_event.parameters == {}
mock_notify.assert_has_calls(
[
mock.call(
NotifyEventType.ORDER_CONFIRMATION,
expected_order_payload,
channel_slug=channel_USD.slug,
),
mock.call(
NotifyEventType.ORDER_PAYMENT_CONFIRMATION,
expected_payment_payload,
channel_slug=channel_USD.slug,
),
],
any_order=True,
)
# Ensure the correct customer event was created if the user was not anonymous
placement_event = customer_user.events.get() # type: CustomerEvent
assert placement_event.type == CustomerEvents.PLACED_ORDER # check the event type
assert placement_event.user == customer_user # check the backref is valid
assert placement_event.order == order # check the associated order is valid
assert placement_event.date # ensure a date was set
assert not placement_event.parameters # should not have any additional parameters
@mock.patch("saleor.plugins.manager.PluginsManager.notify")
def test_create_order_captured_payment_creates_expected_events_anonymous_user(
mock_notify,
checkout_with_item,
customer_user,
shipping_method,
payment_txn_captured,
channel_USD,
):
checkout = checkout_with_item
checkout_user = None
# Ensure not events are existing prior
assert not OrderEvent.objects.exists()
assert not CustomerEvent.objects.exists()
# Prepare valid checkout
checkout.user = checkout_user
checkout.email = "[email protected]"
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_shipping_address
checkout.shipping_method = shipping_method
checkout.payments.add(payment_txn_captured)
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
# Place checkout
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=AnonymousUser(),
app=None,
manager=manager,
)
flush_post_commit_hooks()
(
order_placed_event,
payment_captured_event,
order_fully_paid_event,
order_confirmed_event,
) = order.events.all() # type: OrderEvent
# Ensure the correct order event was created
# is the event the expected type
assert order_placed_event.type == OrderEvents.PLACED
# is the user anonymous/ the customer
assert order_placed_event.user == checkout_user
# is the associated backref order valid
assert order_placed_event.order is order
# ensure a date was set
assert order_placed_event.date
# should not have any additional parameters
assert not order_placed_event.parameters
# Ensure the correct order event was created
# is the event the expected type
assert payment_captured_event.type == OrderEvents.PAYMENT_CAPTURED
# is the user anonymous/ the customer
assert payment_captured_event.user == checkout_user
# is the associated backref order valid
assert payment_captured_event.order is order
# ensure a date was set
assert payment_captured_event.date
# should not have any additional parameters
assert "amount" in payment_captured_event.parameters.keys()
assert "payment_id" in payment_captured_event.parameters.keys()
assert "payment_gateway" in payment_captured_event.parameters.keys()
# Ensure the correct order event was created
# is the event the expected type
assert order_fully_paid_event.type == OrderEvents.ORDER_FULLY_PAID
# is the user anonymous/ the customer
assert order_fully_paid_event.user == checkout_user
# is the associated backref order valid
assert order_fully_paid_event.order is order
# ensure a date was set
assert order_fully_paid_event.date
# should not have any additional parameters
assert not order_fully_paid_event.parameters
expected_order_payload = {
"order": get_default_order_payload(order, checkout.redirect_url),
"recipient_email": order.get_customer_email(),
"site_name": "mirumee.com",
"domain": "mirumee.com",
}
expected_payment_payload = {
"order": get_default_order_payload(order),
"recipient_email": order.get_customer_email(),
"payment": {
"created": payment_txn_captured.created,
"modified": payment_txn_captured.modified,
"charge_status": payment_txn_captured.charge_status,
"total": payment_txn_captured.total,
"captured_amount": payment_txn_captured.captured_amount,
"currency": payment_txn_captured.currency,
},
"site_name": "mirumee.com",
"domain": "mirumee.com",
}
# Ensure the correct order confirmed event was created
# should be order confirmed event
assert order_confirmed_event.type == OrderEvents.CONFIRMED
# ensure the user is checkout user
assert order_confirmed_event.user == checkout_user
# ensure the order confirmed event is related to order
assert order_confirmed_event.order is order
# ensure a date was set
assert order_confirmed_event.date
# ensure the event parameters are empty
assert order_confirmed_event.parameters == {}
mock_notify.assert_has_calls(
[
mock.call(
NotifyEventType.ORDER_CONFIRMATION,
expected_order_payload,
channel_slug=channel_USD.slug,
),
mock.call(
NotifyEventType.ORDER_PAYMENT_CONFIRMATION,
expected_payment_payload,
channel_slug=channel_USD.slug,
),
],
any_order=True,
)
# Check no event was created if the user was anonymous
assert not CustomerEvent.objects.exists() # should not have created any event
@mock.patch("saleor.plugins.manager.PluginsManager.notify")
def test_create_order_preauth_payment_creates_expected_events(
mock_notify,
checkout_with_item,
customer_user,
shipping_method,
payment_txn_preauth,
channel_USD,
):
checkout = checkout_with_item
checkout_user = customer_user
# Ensure not events are existing prior
assert not OrderEvent.objects.exists()
assert not CustomerEvent.objects.exists()
# Prepare valid checkout
checkout.user = checkout_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_shipping_address
checkout.shipping_method = shipping_method
checkout.payments.add(payment_txn_preauth)
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
# Place checkout
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user,
app=None,
manager=manager,
)
flush_post_commit_hooks()
(
order_placed_event,
payment_authorized_event,
order_confirmed_event,
) = order.events.all() # type: OrderEvent
# Ensure the correct order event was created
# is the event the expected type
assert order_placed_event.type == OrderEvents.PLACED
# is the user anonymous/ the customer
assert order_placed_event.user == checkout_user
# is the associated backref order valid
assert order_placed_event.order is order
# ensure a date was set
assert order_placed_event.date
# should not have any additional parameters
assert not order_placed_event.parameters
# Ensure the correct order event was created
# is the event the expected type
assert payment_authorized_event.type == OrderEvents.PAYMENT_AUTHORIZED
# is the user anonymous/ the customer
assert payment_authorized_event.user == checkout_user
# is the associated backref order valid
assert payment_authorized_event.order is order
# ensure a date was set
assert payment_authorized_event.date
# should not have any additional parameters
assert "amount" in payment_authorized_event.parameters.keys()
assert "payment_id" in payment_authorized_event.parameters.keys()
assert "payment_gateway" in payment_authorized_event.parameters.keys()
expected_payload = {
"order": get_default_order_payload(order, checkout.redirect_url),
"recipient_email": order.get_customer_email(),
"site_name": "mirumee.com",
"domain": "mirumee.com",
}
# Ensure the correct order confirmed event was created
# should be order confirmed event
assert order_confirmed_event.type == OrderEvents.CONFIRMED
# ensure the user is checkout user
assert order_confirmed_event.user == checkout_user
# ensure the order confirmed event is related to order
assert order_confirmed_event.order is order
# ensure a date was set
assert order_confirmed_event.date
# ensure the event parameters are empty
assert order_confirmed_event.parameters == {}
mock_notify.assert_called_once_with(
NotifyEventType.ORDER_CONFIRMATION,
expected_payload,
channel_slug=channel_USD.slug,
)
# Ensure the correct customer event was created if the user was not anonymous
placement_event = customer_user.events.get() # type: CustomerEvent
assert placement_event.type == CustomerEvents.PLACED_ORDER # check the event type
assert placement_event.user == customer_user # check the backref is valid
assert placement_event.order == order # check the associated order is valid
assert placement_event.date # ensure a date was set
assert not placement_event.parameters # should not have any additional parameters
@mock.patch("saleor.plugins.manager.PluginsManager.notify")
def test_create_order_preauth_payment_creates_expected_events_anonymous_user(
mock_notify,
checkout_with_item,
customer_user,
shipping_method,
payment_txn_preauth,
channel_USD,
):
checkout = checkout_with_item
checkout_user = None
# Ensure not events are existing prior
assert not OrderEvent.objects.exists()
assert not CustomerEvent.objects.exists()
# Prepare valid checkout
checkout.user = checkout_user
checkout.email = "[email protected]"
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_shipping_address
checkout.shipping_method = shipping_method
checkout.payments.add(payment_txn_preauth)
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
# Place checkout
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=AnonymousUser(),
app=None,
manager=manager,
)
flush_post_commit_hooks()
(
order_placed_event,
payment_captured_event,
order_confirmed_event,
) = order.events.all() # type: OrderEvent
# Ensure the correct order event was created
# is the event the expected type
assert order_placed_event.type == OrderEvents.PLACED
# is the user anonymous/ the customer
assert order_placed_event.user == checkout_user
# is the associated backref order valid
assert order_placed_event.order is order
# ensure a date was set
assert order_placed_event.date
# should not have any additional parameters
assert not order_placed_event.parameters
# Ensure the correct order event was created
# is the event the expected type
assert payment_captured_event.type == OrderEvents.PAYMENT_AUTHORIZED
# is the user anonymous/ the customer
assert payment_captured_event.user == checkout_user
# is the associated backref order valid
assert payment_captured_event.order is order
# ensure a date was set
assert payment_captured_event.date
# should not have any additional parameters
assert "amount" in payment_captured_event.parameters.keys()
assert "payment_id" in payment_captured_event.parameters.keys()
assert "payment_gateway" in payment_captured_event.parameters.keys()
expected_payload = {
"order": get_default_order_payload(order, checkout.redirect_url),
"recipient_email": order.get_customer_email(),
"site_name": "mirumee.com",
"domain": "mirumee.com",
}
# Ensure the correct order confirmed event was created
# should be order confirmed event
assert order_confirmed_event.type == OrderEvents.CONFIRMED
# ensure the user is checkout user
assert order_confirmed_event.user == checkout_user
# ensure the order confirmed event is related to order
assert order_confirmed_event.order is order
# ensure a date was set
assert order_confirmed_event.date
# ensure the event parameters are empty
assert order_confirmed_event.parameters == {}
mock_notify.assert_called_once_with(
NotifyEventType.ORDER_CONFIRMATION,
expected_payload,
channel_slug=channel_USD.slug,
)
# Check no event was created if the user was anonymous
assert not CustomerEvent.objects.exists() # should not have created any event
def test_create_order_insufficient_stock(
checkout, customer_user, product_without_shipping
):
variant = product_without_shipping.variants.get()
manager = get_plugins_manager()
checkout_info = fetch_checkout_info(checkout, [], [], manager)
add_variant_to_checkout(checkout_info, variant, 10, check_quantity=False)
checkout.user = customer_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.tracking_code = "tracking_code"
checkout.save()
lines = fetch_checkout_lines(checkout)
with pytest.raises(InsufficientStock):
_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
)
def test_create_order_doesnt_duplicate_order(
checkout_with_item, customer_user, shipping_method
):
checkout = checkout_with_item
checkout.user = customer_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = ""
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
order_data = _prepare_order_data(
manager=manager, checkout_info=checkout_info, lines=lines, discounts=None
)
order_1 = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=order_data,
user=customer_user,
app=None,
manager=manager,
)
assert order_1.checkout_token == checkout.token
order_2 = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=order_data,
user=customer_user,
app=None,
manager=manager,
)
assert order_1.pk == order_2.pk
@pytest.mark.parametrize("is_anonymous_user", (True, False))
def test_create_order_with_gift_card(
checkout_with_gift_card, customer_user, shipping_method, is_anonymous_user
):
checkout_user = None if is_anonymous_user else customer_user
checkout = checkout_with_gift_card
checkout.user = checkout_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
shipping_price = calculations.checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
total_gross_without_gift_cards = (
subtotal.gross + shipping_price.gross - checkout.discount
)
gift_cards_balance = checkout.get_total_gift_cards_balance()
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user if not is_anonymous_user else AnonymousUser(),
app=None,
manager=manager,
)
assert order.gift_cards.count() == 1
gift_card = order.gift_cards.first()
assert gift_card.current_balance.amount == 0
assert order.total.gross == (total_gross_without_gift_cards - gift_cards_balance)
assert GiftCardEvent.objects.filter(
gift_card=gift_card, type=GiftCardEvents.USED_IN_ORDER
)
def test_create_order_with_gift_card_partial_use(
checkout_with_item, gift_card_used, customer_user, shipping_method
):
checkout = checkout_with_item
checkout.user = customer_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
price_without_gift_card = calculations.checkout_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
gift_card_balance_before_order = gift_card_used.current_balance_amount
checkout.gift_cards.add(gift_card_used)
checkout.save()
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user,
app=None,
manager=manager,
)
gift_card_used.refresh_from_db()
expected_old_balance = (
price_without_gift_card.gross.amount + gift_card_used.current_balance_amount
)
assert order.gift_cards.count() > 0
assert order.total == zero_taxed_money(order.currency)
assert gift_card_balance_before_order == expected_old_balance
assert GiftCardEvent.objects.filter(
gift_card=gift_card_used, type=GiftCardEvents.USED_IN_ORDER
)
def test_create_order_with_many_gift_cards(
checkout_with_item,
gift_card_created_by_staff,
gift_card,
customer_user,
shipping_method,
):
checkout = checkout_with_item
checkout.user = customer_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
price_without_gift_card = calculations.checkout_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
gift_cards_balance_before_order = (
gift_card_created_by_staff.current_balance.amount
+ gift_card.current_balance.amount
)
checkout.gift_cards.add(gift_card_created_by_staff)
checkout.gift_cards.add(gift_card)
checkout.save()
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user,
app=None,
manager=manager,
)
gift_card_created_by_staff.refresh_from_db()
gift_card.refresh_from_db()
zero_price = zero_money(gift_card.currency)
assert order.gift_cards.count() > 0
assert gift_card_created_by_staff.current_balance == zero_price
assert gift_card.current_balance == zero_price
assert price_without_gift_card.gross.amount == (
gift_cards_balance_before_order + order.total.gross.amount
)
assert GiftCardEvent.objects.filter(
gift_card=gift_card_created_by_staff, type=GiftCardEvents.USED_IN_ORDER
)
assert GiftCardEvent.objects.filter(
gift_card=gift_card, type=GiftCardEvents.USED_IN_ORDER
)
@mock.patch("saleor.giftcard.utils.send_gift_card_notification")
@pytest.mark.parametrize("is_anonymous_user", (True, False))
def test_create_order_gift_card_bought(
send_notification_mock,
checkout_with_gift_card_items,
customer_user,
shipping_method,
is_anonymous_user,
non_shippable_gift_card_product,
):
checkout_user = None if is_anonymous_user else customer_user
checkout = checkout_with_gift_card_items
checkout.user = checkout_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
shipping_price = calculations.checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
total_gross = subtotal.gross + shipping_price.gross - checkout.discount
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user if not is_anonymous_user else AnonymousUser(),
app=None,
manager=manager,
)
assert order.total.gross == total_gross
flush_post_commit_hooks()
gift_card = GiftCard.objects.get()
assert (
gift_card.initial_balance
== order.lines.get(
variant=non_shippable_gift_card_product.variants.first()
).unit_price_gross
)
assert GiftCardEvent.objects.filter(gift_card=gift_card, type=GiftCardEvents.BOUGHT)
send_notification_mock.assert_called_once_with(
checkout_user,
None,
checkout_user,
order.user_email,
gift_card,
manager,
order.channel.slug,
resending=False,
)
@mock.patch("saleor.giftcard.utils.send_gift_card_notification")
@pytest.mark.parametrize("is_anonymous_user", (True, False))
def test_create_order_gift_card_bought_only_shippable_gift_card(
send_notification_mock,
checkout,
shippable_gift_card_product,
customer_user,
shipping_method,
is_anonymous_user,
):
checkout_user = None if is_anonymous_user else customer_user
checkout_info = fetch_checkout_info(checkout, [], [], get_plugins_manager())
shippable_variant = shippable_gift_card_product.variants.get()
add_variant_to_checkout(checkout_info, shippable_variant, 2)
checkout.user = checkout_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
shipping_price = calculations.checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
total_gross = subtotal.gross + shipping_price.gross - checkout.discount
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user if not is_anonymous_user else AnonymousUser(),
app=None,
manager=manager,
)
assert order.total.gross == total_gross
assert not GiftCard.objects.all()
send_notification_mock.assert_not_called()
@pytest.mark.parametrize("is_anonymous_user", (True, False))
def test_create_order_gift_card_bought_do_not_fulfill_gift_cards_automatically(
site_settings,
checkout_with_gift_card_items,
customer_user,
shipping_method,
is_anonymous_user,
non_shippable_gift_card_product,
):
site_settings.automatically_fulfill_non_shippable_gift_card = False
site_settings.save(update_fields=["automatically_fulfill_non_shippable_gift_card"])
checkout_user = None if is_anonymous_user else customer_user
checkout = checkout_with_gift_card_items
checkout.user = checkout_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = "tracking_code"
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
shipping_price = calculations.checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=checkout.shipping_address,
)
total_gross = subtotal.gross + shipping_price.gross - checkout.discount
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user if not is_anonymous_user else AnonymousUser(),
app=None,
manager=manager,
)
assert order.total.gross == total_gross
assert not GiftCard.objects.all()
def test_note_in_created_order(checkout_with_item, address, customer_user):
checkout_with_item.shipping_address = address
checkout_with_item.note = "test_note"
checkout_with_item.tracking_code = "tracking_code"
checkout_with_item.redirect_url = "https://www.example.com"
checkout_with_item.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout_with_item)
checkout_info = fetch_checkout_info(checkout_with_item, lines, [], manager)
order = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=_prepare_order_data(
manager=manager,
checkout_info=checkout_info,
lines=lines,
discounts=None,
),
user=customer_user,
app=None,
manager=manager,
)
assert order.customer_note == checkout_with_item.note
def test_create_order_with_variant_tracking_false(
checkout, customer_user, variant_without_inventory_tracking
):
variant = variant_without_inventory_tracking
checkout.user = customer_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.tracking_code = ""
checkout.redirect_url = "https://www.example.com"
checkout.save()
manager = get_plugins_manager()
checkout_info = fetch_checkout_info(checkout, [], [], manager)
add_variant_to_checkout(checkout_info, variant, 10, check_quantity=False)
lines = fetch_checkout_lines(checkout)
order_data = _prepare_order_data(
manager=manager, checkout_info=checkout_info, lines=lines, discounts=None
)
order_1 = _create_order(
checkout_info=checkout_info,
checkout_lines=lines,
order_data=order_data,
user=customer_user,
app=None,
manager=manager,
)
assert order_1.checkout_token == checkout.token
@override_settings(LANGUAGE_CODE="fr")
def test_create_order_use_translations(
checkout_with_item, customer_user, shipping_method
):
translated_product_name = "French name"
translated_variant_name = "French variant name"
checkout = checkout_with_item
checkout.user = customer_user
checkout.billing_address = customer_user.default_billing_address
checkout.shipping_address = customer_user.default_billing_address
checkout.shipping_method = shipping_method
checkout.tracking_code = ""
checkout.redirect_url = "https://www.example.com"
checkout.language_code = "fr"
checkout.save()
manager = get_plugins_manager()
lines = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
variant = lines[0].variant
product = lines[0].product
ProductTranslation.objects.create(
language_code="fr",
product=product,
name=translated_product_name,
)
ProductVariantTranslation.objects.create(
language_code="fr",
product_variant=variant,
name=translated_variant_name,
)
order_data = _prepare_order_data(
manager=manager, checkout_info=checkout_info, lines=lines, discounts=None
)
order_line = order_data["lines"][0].line
assert order_line.translated_product_name == translated_product_name
assert order_line.translated_variant_name == translated_variant_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.